GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/if_vlan.h>
42 #include <linux/mdio.h>
43 #include <linux/sockios.h>
44 #include <linux/workqueue.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/firmware.h>
48 #include <linux/log2.h>
49 #include <linux/stringify.h>
50 #include <linux/sched.h>
51 #include <linux/slab.h>
52 #include <linux/uaccess.h>
53 #include <linux/nospec.h>
54
55 #include "common.h"
56 #include "cxgb3_ioctl.h"
57 #include "regs.h"
58 #include "cxgb3_offload.h"
59 #include "version.h"
60
61 #include "cxgb3_ctl_defs.h"
62 #include "t3_cpl.h"
63 #include "firmware_exports.h"
64
65 enum {
66         MAX_TXQ_ENTRIES = 16384,
67         MAX_CTRL_TXQ_ENTRIES = 1024,
68         MAX_RSPQ_ENTRIES = 16384,
69         MAX_RX_BUFFERS = 16384,
70         MAX_RX_JUMBO_BUFFERS = 16384,
71         MIN_TXQ_ENTRIES = 4,
72         MIN_CTRL_TXQ_ENTRIES = 4,
73         MIN_RSPQ_ENTRIES = 32,
74         MIN_FL_ENTRIES = 32
75 };
76
77 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
78
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83 #define EEPROM_MAGIC 0x38E2F10C
84
85 #define CH_DEVICE(devid, idx) \
86         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
87
88 static const struct pci_device_id cxgb3_pci_tbl[] = {
89         CH_DEVICE(0x20, 0),     /* PE9000 */
90         CH_DEVICE(0x21, 1),     /* T302E */
91         CH_DEVICE(0x22, 2),     /* T310E */
92         CH_DEVICE(0x23, 3),     /* T320X */
93         CH_DEVICE(0x24, 1),     /* T302X */
94         CH_DEVICE(0x25, 3),     /* T320E */
95         CH_DEVICE(0x26, 2),     /* T310X */
96         CH_DEVICE(0x30, 2),     /* T3B10 */
97         CH_DEVICE(0x31, 3),     /* T3B20 */
98         CH_DEVICE(0x32, 1),     /* T3B02 */
99         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
100         CH_DEVICE(0x36, 3),     /* S320E-CR */
101         CH_DEVICE(0x37, 7),     /* N320E-G2 */
102         {0,}
103 };
104
105 MODULE_DESCRIPTION(DRV_DESC);
106 MODULE_AUTHOR("Chelsio Communications");
107 MODULE_LICENSE("Dual BSD/GPL");
108 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
109
110 static int dflt_msg_enable = DFLT_MSG_ENABLE;
111
112 module_param(dflt_msg_enable, int, 0644);
113 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
114
115 /*
116  * The driver uses the best interrupt scheme available on a platform in the
117  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
118  * of these schemes the driver may consider as follows:
119  *
120  * msi = 2: choose from among all three options
121  * msi = 1: only consider MSI and pin interrupts
122  * msi = 0: force pin interrupts
123  */
124 static int msi = 2;
125
126 module_param(msi, int, 0644);
127 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
128
129 /*
130  * The driver enables offload as a default.
131  * To disable it, use ofld_disable = 1.
132  */
133
134 static int ofld_disable = 0;
135
136 module_param(ofld_disable, int, 0644);
137 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
138
139 /*
140  * We have work elements that we need to cancel when an interface is taken
141  * down.  Normally the work elements would be executed by keventd but that
142  * can deadlock because of linkwatch.  If our close method takes the rtnl
143  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
144  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
145  * for our work to complete.  Get our own work queue to solve this.
146  */
147 struct workqueue_struct *cxgb3_wq;
148
149 /**
150  *      link_report - show link status and link speed/duplex
151  *      @dev: the port whose settings are to be reported
152  *
153  *      Shows the link status, speed, and duplex of a port.
154  */
155 static void link_report(struct net_device *dev)
156 {
157         if (!netif_carrier_ok(dev))
158                 netdev_info(dev, "link down\n");
159         else {
160                 const char *s = "10Mbps";
161                 const struct port_info *p = netdev_priv(dev);
162
163                 switch (p->link_config.speed) {
164                 case SPEED_10000:
165                         s = "10Gbps";
166                         break;
167                 case SPEED_1000:
168                         s = "1000Mbps";
169                         break;
170                 case SPEED_100:
171                         s = "100Mbps";
172                         break;
173                 }
174
175                 netdev_info(dev, "link up, %s, %s-duplex\n",
176                             s, p->link_config.duplex == DUPLEX_FULL
177                             ? "full" : "half");
178         }
179 }
180
181 static void enable_tx_fifo_drain(struct adapter *adapter,
182                                  struct port_info *pi)
183 {
184         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
185                          F_ENDROPPKT);
186         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
187         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
188         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
189 }
190
191 static void disable_tx_fifo_drain(struct adapter *adapter,
192                                   struct port_info *pi)
193 {
194         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
195                          F_ENDROPPKT, 0);
196 }
197
198 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
199 {
200         struct net_device *dev = adap->port[port_id];
201         struct port_info *pi = netdev_priv(dev);
202
203         if (state == netif_carrier_ok(dev))
204                 return;
205
206         if (state) {
207                 struct cmac *mac = &pi->mac;
208
209                 netif_carrier_on(dev);
210
211                 disable_tx_fifo_drain(adap, pi);
212
213                 /* Clear local faults */
214                 t3_xgm_intr_disable(adap, pi->port_id);
215                 t3_read_reg(adap, A_XGM_INT_STATUS +
216                                     pi->mac.offset);
217                 t3_write_reg(adap,
218                              A_XGM_INT_CAUSE + pi->mac.offset,
219                              F_XGM_INT);
220
221                 t3_set_reg_field(adap,
222                                  A_XGM_INT_ENABLE +
223                                  pi->mac.offset,
224                                  F_XGM_INT, F_XGM_INT);
225                 t3_xgm_intr_enable(adap, pi->port_id);
226
227                 t3_mac_enable(mac, MAC_DIRECTION_TX);
228         } else {
229                 netif_carrier_off(dev);
230
231                 /* Flush TX FIFO */
232                 enable_tx_fifo_drain(adap, pi);
233         }
234         link_report(dev);
235 }
236
237 /**
238  *      t3_os_link_changed - handle link status changes
239  *      @adapter: the adapter associated with the link change
240  *      @port_id: the port index whose limk status has changed
241  *      @link_stat: the new status of the link
242  *      @speed: the new speed setting
243  *      @duplex: the new duplex setting
244  *      @pause: the new flow-control setting
245  *
246  *      This is the OS-dependent handler for link status changes.  The OS
247  *      neutral handler takes care of most of the processing for these events,
248  *      then calls this handler for any OS-specific processing.
249  */
250 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
251                         int speed, int duplex, int pause)
252 {
253         struct net_device *dev = adapter->port[port_id];
254         struct port_info *pi = netdev_priv(dev);
255         struct cmac *mac = &pi->mac;
256
257         /* Skip changes from disabled ports. */
258         if (!netif_running(dev))
259                 return;
260
261         if (link_stat != netif_carrier_ok(dev)) {
262                 if (link_stat) {
263                         disable_tx_fifo_drain(adapter, pi);
264
265                         t3_mac_enable(mac, MAC_DIRECTION_RX);
266
267                         /* Clear local faults */
268                         t3_xgm_intr_disable(adapter, pi->port_id);
269                         t3_read_reg(adapter, A_XGM_INT_STATUS +
270                                     pi->mac.offset);
271                         t3_write_reg(adapter,
272                                      A_XGM_INT_CAUSE + pi->mac.offset,
273                                      F_XGM_INT);
274
275                         t3_set_reg_field(adapter,
276                                          A_XGM_INT_ENABLE + pi->mac.offset,
277                                          F_XGM_INT, F_XGM_INT);
278                         t3_xgm_intr_enable(adapter, pi->port_id);
279
280                         netif_carrier_on(dev);
281                 } else {
282                         netif_carrier_off(dev);
283
284                         t3_xgm_intr_disable(adapter, pi->port_id);
285                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
286                         t3_set_reg_field(adapter,
287                                          A_XGM_INT_ENABLE + pi->mac.offset,
288                                          F_XGM_INT, 0);
289
290                         if (is_10G(adapter))
291                                 pi->phy.ops->power_down(&pi->phy, 1);
292
293                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
294                         t3_mac_disable(mac, MAC_DIRECTION_RX);
295                         t3_link_start(&pi->phy, mac, &pi->link_config);
296
297                         /* Flush TX FIFO */
298                         enable_tx_fifo_drain(adapter, pi);
299                 }
300
301                 link_report(dev);
302         }
303 }
304
305 /**
306  *      t3_os_phymod_changed - handle PHY module changes
307  *      @adap: the adapter associated with the link change
308  *      @port_id: the port index whose limk status has changed
309  *
310  *      This is the OS-dependent handler for PHY module changes.  It is
311  *      invoked when a PHY module is removed or inserted for any OS-specific
312  *      processing.
313  */
314 void t3_os_phymod_changed(struct adapter *adap, int port_id)
315 {
316         static const char *mod_str[] = {
317                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
318         };
319
320         const struct net_device *dev = adap->port[port_id];
321         const struct port_info *pi = netdev_priv(dev);
322
323         if (pi->phy.modtype == phy_modtype_none)
324                 netdev_info(dev, "PHY module unplugged\n");
325         else
326                 netdev_info(dev, "%s PHY module inserted\n",
327                             mod_str[pi->phy.modtype]);
328 }
329
330 static void cxgb_set_rxmode(struct net_device *dev)
331 {
332         struct port_info *pi = netdev_priv(dev);
333
334         t3_mac_set_rx_mode(&pi->mac, dev);
335 }
336
337 /**
338  *      link_start - enable a port
339  *      @dev: the device to enable
340  *
341  *      Performs the MAC and PHY actions needed to enable a port.
342  */
343 static void link_start(struct net_device *dev)
344 {
345         struct port_info *pi = netdev_priv(dev);
346         struct cmac *mac = &pi->mac;
347
348         t3_mac_reset(mac);
349         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
350         t3_mac_set_mtu(mac, dev->mtu);
351         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
352         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
353         t3_mac_set_rx_mode(mac, dev);
354         t3_link_start(&pi->phy, mac, &pi->link_config);
355         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
356 }
357
358 static inline void cxgb_disable_msi(struct adapter *adapter)
359 {
360         if (adapter->flags & USING_MSIX) {
361                 pci_disable_msix(adapter->pdev);
362                 adapter->flags &= ~USING_MSIX;
363         } else if (adapter->flags & USING_MSI) {
364                 pci_disable_msi(adapter->pdev);
365                 adapter->flags &= ~USING_MSI;
366         }
367 }
368
369 /*
370  * Interrupt handler for asynchronous events used with MSI-X.
371  */
372 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
373 {
374         t3_slow_intr_handler(cookie);
375         return IRQ_HANDLED;
376 }
377
378 /*
379  * Name the MSI-X interrupts.
380  */
381 static void name_msix_vecs(struct adapter *adap)
382 {
383         int i, j, msi_idx = 1;
384
385         strscpy(adap->msix_info[0].desc, adap->name, sizeof(adap->msix_info[0].desc));
386
387         for_each_port(adap, j) {
388                 struct net_device *d = adap->port[j];
389                 const struct port_info *pi = netdev_priv(d);
390
391                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
392                         snprintf(adap->msix_info[msi_idx].desc,
393                                  sizeof(adap->msix_info[0].desc),
394                                  "%s-%d", d->name, pi->first_qset + i);
395                 }
396         }
397 }
398
399 static int request_msix_data_irqs(struct adapter *adap)
400 {
401         int i, j, err, qidx = 0;
402
403         for_each_port(adap, i) {
404                 int nqsets = adap2pinfo(adap, i)->nqsets;
405
406                 for (j = 0; j < nqsets; ++j) {
407                         err = request_irq(adap->msix_info[qidx + 1].vec,
408                                           t3_intr_handler(adap,
409                                                           adap->sge.qs[qidx].
410                                                           rspq.polling), 0,
411                                           adap->msix_info[qidx + 1].desc,
412                                           &adap->sge.qs[qidx]);
413                         if (err) {
414                                 while (--qidx >= 0)
415                                         free_irq(adap->msix_info[qidx + 1].vec,
416                                                  &adap->sge.qs[qidx]);
417                                 return err;
418                         }
419                         qidx++;
420                 }
421         }
422         return 0;
423 }
424
425 static void free_irq_resources(struct adapter *adapter)
426 {
427         if (adapter->flags & USING_MSIX) {
428                 int i, n = 0;
429
430                 free_irq(adapter->msix_info[0].vec, adapter);
431                 for_each_port(adapter, i)
432                         n += adap2pinfo(adapter, i)->nqsets;
433
434                 for (i = 0; i < n; ++i)
435                         free_irq(adapter->msix_info[i + 1].vec,
436                                  &adapter->sge.qs[i]);
437         } else
438                 free_irq(adapter->pdev->irq, adapter);
439 }
440
441 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
442                               unsigned long n)
443 {
444         int attempts = 10;
445
446         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
447                 if (!--attempts)
448                         return -ETIMEDOUT;
449                 msleep(10);
450         }
451         return 0;
452 }
453
454 static int init_tp_parity(struct adapter *adap)
455 {
456         int i;
457         struct sk_buff *skb;
458         struct cpl_set_tcb_field *greq;
459         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
460
461         t3_tp_set_offload_mode(adap, 1);
462
463         for (i = 0; i < 16; i++) {
464                 struct cpl_smt_write_req *req;
465
466                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
467                 if (!skb)
468                         skb = adap->nofail_skb;
469                 if (!skb)
470                         goto alloc_skb_fail;
471
472                 req = __skb_put_zero(skb, sizeof(*req));
473                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
475                 req->mtu_idx = NMTUS - 1;
476                 req->iff = i;
477                 t3_mgmt_tx(adap, skb);
478                 if (skb == adap->nofail_skb) {
479                         await_mgmt_replies(adap, cnt, i + 1);
480                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
481                         if (!adap->nofail_skb)
482                                 goto alloc_skb_fail;
483                 }
484         }
485
486         for (i = 0; i < 2048; i++) {
487                 struct cpl_l2t_write_req *req;
488
489                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
490                 if (!skb)
491                         skb = adap->nofail_skb;
492                 if (!skb)
493                         goto alloc_skb_fail;
494
495                 req = __skb_put_zero(skb, sizeof(*req));
496                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
497                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
498                 req->params = htonl(V_L2T_W_IDX(i));
499                 t3_mgmt_tx(adap, skb);
500                 if (skb == adap->nofail_skb) {
501                         await_mgmt_replies(adap, cnt, 16 + i + 1);
502                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503                         if (!adap->nofail_skb)
504                                 goto alloc_skb_fail;
505                 }
506         }
507
508         for (i = 0; i < 2048; i++) {
509                 struct cpl_rte_write_req *req;
510
511                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
512                 if (!skb)
513                         skb = adap->nofail_skb;
514                 if (!skb)
515                         goto alloc_skb_fail;
516
517                 req = __skb_put_zero(skb, sizeof(*req));
518                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
519                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
520                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
521                 t3_mgmt_tx(adap, skb);
522                 if (skb == adap->nofail_skb) {
523                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
524                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
525                         if (!adap->nofail_skb)
526                                 goto alloc_skb_fail;
527                 }
528         }
529
530         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
531         if (!skb)
532                 skb = adap->nofail_skb;
533         if (!skb)
534                 goto alloc_skb_fail;
535
536         greq = __skb_put_zero(skb, sizeof(*greq));
537         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
538         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
539         greq->mask = cpu_to_be64(1);
540         t3_mgmt_tx(adap, skb);
541
542         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
543         if (skb == adap->nofail_skb) {
544                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
546         }
547
548         t3_tp_set_offload_mode(adap, 0);
549         return i;
550
551 alloc_skb_fail:
552         t3_tp_set_offload_mode(adap, 0);
553         return -ENOMEM;
554 }
555
556 /**
557  *      setup_rss - configure RSS
558  *      @adap: the adapter
559  *
560  *      Sets up RSS to distribute packets to multiple receive queues.  We
561  *      configure the RSS CPU lookup table to distribute to the number of HW
562  *      receive queues, and the response queue lookup table to narrow that
563  *      down to the response queues actually configured for each port.
564  *      We always configure the RSS mapping for two ports since the mapping
565  *      table has plenty of entries.
566  */
567 static void setup_rss(struct adapter *adap)
568 {
569         int i;
570         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
571         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
572         u8 cpus[SGE_QSETS + 1];
573         u16 rspq_map[RSS_TABLE_SIZE + 1];
574
575         for (i = 0; i < SGE_QSETS; ++i)
576                 cpus[i] = i;
577         cpus[SGE_QSETS] = 0xff; /* terminator */
578
579         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
580                 rspq_map[i] = i % nq0;
581                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
582         }
583         rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
584
585         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
586                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
587                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
588 }
589
590 static void ring_dbs(struct adapter *adap)
591 {
592         int i, j;
593
594         for (i = 0; i < SGE_QSETS; i++) {
595                 struct sge_qset *qs = &adap->sge.qs[i];
596
597                 if (qs->adap)
598                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
599                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
600         }
601 }
602
603 static void init_napi(struct adapter *adap)
604 {
605         int i;
606
607         for (i = 0; i < SGE_QSETS; i++) {
608                 struct sge_qset *qs = &adap->sge.qs[i];
609
610                 if (qs->adap)
611                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
612         }
613
614         /*
615          * netif_napi_add() can be called only once per napi_struct because it
616          * adds each new napi_struct to a list.  Be careful not to call it a
617          * second time, e.g., during EEH recovery, by making a note of it.
618          */
619         adap->flags |= NAPI_INIT;
620 }
621
622 /*
623  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
624  * both netdevices representing interfaces and the dummy ones for the extra
625  * queues.
626  */
627 static void quiesce_rx(struct adapter *adap)
628 {
629         int i;
630
631         for (i = 0; i < SGE_QSETS; i++)
632                 if (adap->sge.qs[i].adap)
633                         napi_disable(&adap->sge.qs[i].napi);
634 }
635
636 static void enable_all_napi(struct adapter *adap)
637 {
638         int i;
639         for (i = 0; i < SGE_QSETS; i++)
640                 if (adap->sge.qs[i].adap)
641                         napi_enable(&adap->sge.qs[i].napi);
642 }
643
644 /**
645  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
646  *      @adap: the adapter
647  *
648  *      Determines how many sets of SGE queues to use and initializes them.
649  *      We support multiple queue sets per port if we have MSI-X, otherwise
650  *      just one queue set per port.
651  */
652 static int setup_sge_qsets(struct adapter *adap)
653 {
654         int i, j, err, irq_idx = 0, qset_idx = 0;
655         unsigned int ntxq = SGE_TXQ_PER_SET;
656
657         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
658                 irq_idx = -1;
659
660         for_each_port(adap, i) {
661                 struct net_device *dev = adap->port[i];
662                 struct port_info *pi = netdev_priv(dev);
663
664                 pi->qs = &adap->sge.qs[pi->first_qset];
665                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
666                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
667                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
668                                                              irq_idx,
669                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
670                                 netdev_get_tx_queue(dev, j));
671                         if (err) {
672                                 t3_free_sge_resources(adap);
673                                 return err;
674                         }
675                 }
676         }
677
678         return 0;
679 }
680
681 static ssize_t attr_show(struct device *d, char *buf,
682                          ssize_t(*format) (struct net_device *, char *))
683 {
684         ssize_t len;
685
686         /* Synchronize with ioctls that may shut down the device */
687         rtnl_lock();
688         len = (*format) (to_net_dev(d), buf);
689         rtnl_unlock();
690         return len;
691 }
692
693 static ssize_t attr_store(struct device *d,
694                           const char *buf, size_t len,
695                           ssize_t(*set) (struct net_device *, unsigned int),
696                           unsigned int min_val, unsigned int max_val)
697 {
698         ssize_t ret;
699         unsigned int val;
700
701         if (!capable(CAP_NET_ADMIN))
702                 return -EPERM;
703
704         ret = kstrtouint(buf, 0, &val);
705         if (ret)
706                 return ret;
707         if (val < min_val || val > max_val)
708                 return -EINVAL;
709
710         rtnl_lock();
711         ret = (*set) (to_net_dev(d), val);
712         if (!ret)
713                 ret = len;
714         rtnl_unlock();
715         return ret;
716 }
717
718 #define CXGB3_SHOW(name, val_expr) \
719 static ssize_t format_##name(struct net_device *dev, char *buf) \
720 { \
721         struct port_info *pi = netdev_priv(dev); \
722         struct adapter *adap = pi->adapter; \
723         return sprintf(buf, "%u\n", val_expr); \
724 } \
725 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
726                            char *buf) \
727 { \
728         return attr_show(d, buf, format_##name); \
729 }
730
731 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
732 {
733         struct port_info *pi = netdev_priv(dev);
734         struct adapter *adap = pi->adapter;
735         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
736
737         if (adap->flags & FULL_INIT_DONE)
738                 return -EBUSY;
739         if (val && adap->params.rev == 0)
740                 return -EINVAL;
741         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
742             min_tids)
743                 return -EINVAL;
744         adap->params.mc5.nfilters = val;
745         return 0;
746 }
747
748 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
749                               const char *buf, size_t len)
750 {
751         return attr_store(d, buf, len, set_nfilters, 0, ~0);
752 }
753
754 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
755 {
756         struct port_info *pi = netdev_priv(dev);
757         struct adapter *adap = pi->adapter;
758
759         if (adap->flags & FULL_INIT_DONE)
760                 return -EBUSY;
761         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
762             MC5_MIN_TIDS)
763                 return -EINVAL;
764         adap->params.mc5.nservers = val;
765         return 0;
766 }
767
768 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
769                               const char *buf, size_t len)
770 {
771         return attr_store(d, buf, len, set_nservers, 0, ~0);
772 }
773
774 #define CXGB3_ATTR_R(name, val_expr) \
775 CXGB3_SHOW(name, val_expr) \
776 static DEVICE_ATTR(name, 0444, show_##name, NULL)
777
778 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
779 CXGB3_SHOW(name, val_expr) \
780 static DEVICE_ATTR(name, 0644, show_##name, store_method)
781
782 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
783 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
784 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
785
786 static struct attribute *cxgb3_attrs[] = {
787         &dev_attr_cam_size.attr,
788         &dev_attr_nfilters.attr,
789         &dev_attr_nservers.attr,
790         NULL
791 };
792
793 static const struct attribute_group cxgb3_attr_group = {
794         .attrs = cxgb3_attrs,
795 };
796
797 static ssize_t tm_attr_show(struct device *d,
798                             char *buf, int sched)
799 {
800         struct port_info *pi = netdev_priv(to_net_dev(d));
801         struct adapter *adap = pi->adapter;
802         unsigned int v, addr, bpt, cpt;
803         ssize_t len;
804
805         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
806         rtnl_lock();
807         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
808         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
809         if (sched & 1)
810                 v >>= 16;
811         bpt = (v >> 8) & 0xff;
812         cpt = v & 0xff;
813         if (!cpt)
814                 len = sprintf(buf, "disabled\n");
815         else {
816                 v = (adap->params.vpd.cclk * 1000) / cpt;
817                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
818         }
819         rtnl_unlock();
820         return len;
821 }
822
823 static ssize_t tm_attr_store(struct device *d,
824                              const char *buf, size_t len, int sched)
825 {
826         struct port_info *pi = netdev_priv(to_net_dev(d));
827         struct adapter *adap = pi->adapter;
828         unsigned int val;
829         ssize_t ret;
830
831         if (!capable(CAP_NET_ADMIN))
832                 return -EPERM;
833
834         ret = kstrtouint(buf, 0, &val);
835         if (ret)
836                 return ret;
837         if (val > 10000000)
838                 return -EINVAL;
839
840         rtnl_lock();
841         ret = t3_config_sched(adap, val, sched);
842         if (!ret)
843                 ret = len;
844         rtnl_unlock();
845         return ret;
846 }
847
848 #define TM_ATTR(name, sched) \
849 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
850                            char *buf) \
851 { \
852         return tm_attr_show(d, buf, sched); \
853 } \
854 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
855                             const char *buf, size_t len) \
856 { \
857         return tm_attr_store(d, buf, len, sched); \
858 } \
859 static DEVICE_ATTR(name, 0644, show_##name, store_##name)
860
861 TM_ATTR(sched0, 0);
862 TM_ATTR(sched1, 1);
863 TM_ATTR(sched2, 2);
864 TM_ATTR(sched3, 3);
865 TM_ATTR(sched4, 4);
866 TM_ATTR(sched5, 5);
867 TM_ATTR(sched6, 6);
868 TM_ATTR(sched7, 7);
869
870 static struct attribute *offload_attrs[] = {
871         &dev_attr_sched0.attr,
872         &dev_attr_sched1.attr,
873         &dev_attr_sched2.attr,
874         &dev_attr_sched3.attr,
875         &dev_attr_sched4.attr,
876         &dev_attr_sched5.attr,
877         &dev_attr_sched6.attr,
878         &dev_attr_sched7.attr,
879         NULL
880 };
881
882 static const struct attribute_group offload_attr_group = {
883         .attrs = offload_attrs,
884 };
885
886 /*
887  * Sends an sk_buff to an offload queue driver
888  * after dealing with any active network taps.
889  */
890 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
891 {
892         int ret;
893
894         local_bh_disable();
895         ret = t3_offload_tx(tdev, skb);
896         local_bh_enable();
897         return ret;
898 }
899
900 static int write_smt_entry(struct adapter *adapter, int idx)
901 {
902         struct cpl_smt_write_req *req;
903         struct port_info *pi = netdev_priv(adapter->port[idx]);
904         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
905
906         if (!skb)
907                 return -ENOMEM;
908
909         req = __skb_put(skb, sizeof(*req));
910         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
911         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
912         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
913         req->iff = idx;
914         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
915         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
916         skb->priority = 1;
917         offload_tx(&adapter->tdev, skb);
918         return 0;
919 }
920
921 static int init_smt(struct adapter *adapter)
922 {
923         int i;
924
925         for_each_port(adapter, i)
926             write_smt_entry(adapter, i);
927         return 0;
928 }
929
930 static void init_port_mtus(struct adapter *adapter)
931 {
932         unsigned int mtus = adapter->port[0]->mtu;
933
934         if (adapter->port[1])
935                 mtus |= adapter->port[1]->mtu << 16;
936         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
937 }
938
939 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
940                               int hi, int port)
941 {
942         struct sk_buff *skb;
943         struct mngt_pktsched_wr *req;
944         int ret;
945
946         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
947         if (!skb)
948                 skb = adap->nofail_skb;
949         if (!skb)
950                 return -ENOMEM;
951
952         req = skb_put(skb, sizeof(*req));
953         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
954         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
955         req->sched = sched;
956         req->idx = qidx;
957         req->min = lo;
958         req->max = hi;
959         req->binding = port;
960         ret = t3_mgmt_tx(adap, skb);
961         if (skb == adap->nofail_skb) {
962                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
963                                              GFP_KERNEL);
964                 if (!adap->nofail_skb)
965                         ret = -ENOMEM;
966         }
967
968         return ret;
969 }
970
971 static int bind_qsets(struct adapter *adap)
972 {
973         int i, j, err = 0;
974
975         for_each_port(adap, i) {
976                 const struct port_info *pi = adap2pinfo(adap, i);
977
978                 for (j = 0; j < pi->nqsets; ++j) {
979                         int ret = send_pktsched_cmd(adap, 1,
980                                                     pi->first_qset + j, -1,
981                                                     -1, i);
982                         if (ret)
983                                 err = ret;
984                 }
985         }
986
987         return err;
988 }
989
990 /*(DEBLOBBED)*/
991 #define FW_FNAME "/*(DEBLOBBED)*/"
992 /*(DEBLOBBED)*/
993 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
994 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
995 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
996 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
997 /*(DEBLOBBED)*/
998
999 static inline const char *get_edc_fw_name(int edc_idx)
1000 {
1001         const char *fw_name = NULL;
1002
1003         switch (edc_idx) {
1004         case EDC_OPT_AEL2005:
1005                 fw_name = AEL2005_OPT_EDC_NAME;
1006                 break;
1007         case EDC_TWX_AEL2005:
1008                 fw_name = AEL2005_TWX_EDC_NAME;
1009                 break;
1010         case EDC_TWX_AEL2020:
1011                 fw_name = AEL2020_TWX_EDC_NAME;
1012                 break;
1013         }
1014         return fw_name;
1015 }
1016
1017 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1018 {
1019         struct adapter *adapter = phy->adapter;
1020         const struct firmware *fw;
1021         const char *fw_name;
1022         u32 csum;
1023         const __be32 *p;
1024         u16 *cache = phy->phy_cache;
1025         int i, ret = -EINVAL;
1026
1027         fw_name = get_edc_fw_name(edc_idx);
1028         if (fw_name)
1029                 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1030         if (ret < 0) {
1031                 dev_err(&adapter->pdev->dev,
1032                         "could not upgrade firmware: unable to load %s\n",
1033                         fw_name);
1034                 return ret;
1035         }
1036
1037         /* check size, take checksum in account */
1038         if (fw->size > size + 4) {
1039                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1040                        (unsigned int)fw->size, size + 4);
1041                 ret = -EINVAL;
1042         }
1043
1044         /* compute checksum */
1045         p = (const __be32 *)fw->data;
1046         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1047                 csum += ntohl(p[i]);
1048
1049         if (csum != 0xffffffff) {
1050                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1051                        csum);
1052                 ret = -EINVAL;
1053         }
1054
1055         for (i = 0; i < size / 4 ; i++) {
1056                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1057                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1058         }
1059
1060         release_firmware(fw);
1061
1062         return ret;
1063 }
1064
1065 static int upgrade_fw(struct adapter *adap)
1066 {
1067         int ret;
1068         const struct firmware *fw;
1069         struct device *dev = &adap->pdev->dev;
1070
1071         ret = reject_firmware(&fw, FW_FNAME, dev);
1072         if (ret < 0) {
1073                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1074                         FW_FNAME);
1075                 return ret;
1076         }
1077         ret = t3_load_fw(adap, fw->data, fw->size);
1078         release_firmware(fw);
1079
1080         if (ret == 0)
1081                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1082                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1083         else
1084                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1085                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1086
1087         return ret;
1088 }
1089
1090 static inline char t3rev2char(struct adapter *adapter)
1091 {
1092         char rev = 0;
1093
1094         switch(adapter->params.rev) {
1095         case T3_REV_B:
1096         case T3_REV_B2:
1097                 rev = 'b';
1098                 break;
1099         case T3_REV_C:
1100                 rev = 'c';
1101                 break;
1102         }
1103         return rev;
1104 }
1105
1106 static int update_tpsram(struct adapter *adap)
1107 {
1108         const struct firmware *tpsram;
1109         char buf[64];
1110         struct device *dev = &adap->pdev->dev;
1111         int ret;
1112         char rev;
1113
1114         rev = t3rev2char(adap);
1115         if (!rev)
1116                 return 0;
1117
1118         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1119
1120         ret = reject_firmware(&tpsram, buf, dev);
1121         if (ret < 0) {
1122                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1123                         buf);
1124                 return ret;
1125         }
1126
1127         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1128         if (ret)
1129                 goto release_tpsram;
1130
1131         ret = t3_set_proto_sram(adap, tpsram->data);
1132         if (ret == 0)
1133                 dev_info(dev,
1134                          "successful update of protocol engine "
1135                          "to %d.%d.%d\n",
1136                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1137         else
1138                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1139                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1140         if (ret)
1141                 dev_err(dev, "loading protocol SRAM failed\n");
1142
1143 release_tpsram:
1144         release_firmware(tpsram);
1145
1146         return ret;
1147 }
1148
1149 /**
1150  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1151  * @adap: the adapter
1152  * @p: the port
1153  *
1154  * Ensures that current Rx processing on any of the queues associated with
1155  * the given port completes before returning.  We do this by acquiring and
1156  * releasing the locks of the response queues associated with the port.
1157  */
1158 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1159 {
1160         int i;
1161
1162         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1163                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1164
1165                 spin_lock_irq(&q->lock);
1166                 spin_unlock_irq(&q->lock);
1167         }
1168 }
1169
1170 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1171 {
1172         struct port_info *pi = netdev_priv(dev);
1173         struct adapter *adapter = pi->adapter;
1174
1175         if (adapter->params.rev > 0) {
1176                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1177                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1178         } else {
1179                 /* single control for all ports */
1180                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1181
1182                 for_each_port(adapter, i)
1183                         have_vlans |=
1184                                 adapter->port[i]->features &
1185                                 NETIF_F_HW_VLAN_CTAG_RX;
1186
1187                 t3_set_vlan_accel(adapter, 1, have_vlans);
1188         }
1189         t3_synchronize_rx(adapter, pi);
1190 }
1191
1192 /**
1193  *      cxgb_up - enable the adapter
1194  *      @adap: adapter being enabled
1195  *
1196  *      Called when the first port is enabled, this function performs the
1197  *      actions necessary to make an adapter operational, such as completing
1198  *      the initialization of HW modules, and enabling interrupts.
1199  *
1200  *      Must be called with the rtnl lock held.
1201  */
1202 static int cxgb_up(struct adapter *adap)
1203 {
1204         int i, err;
1205
1206         if (!(adap->flags & FULL_INIT_DONE)) {
1207                 err = t3_check_fw_version(adap);
1208                 if (err == -EINVAL) {
1209                         err = upgrade_fw(adap);
1210                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1211                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1212                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1213                 }
1214
1215                 err = t3_check_tpsram_version(adap);
1216                 if (err == -EINVAL) {
1217                         err = update_tpsram(adap);
1218                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1219                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1220                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1221                 }
1222
1223                 /*
1224                  * Clear interrupts now to catch errors if t3_init_hw fails.
1225                  * We clear them again later as initialization may trigger
1226                  * conditions that can interrupt.
1227                  */
1228                 t3_intr_clear(adap);
1229
1230                 err = t3_init_hw(adap, 0);
1231                 if (err)
1232                         goto out;
1233
1234                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1235                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1236
1237                 err = setup_sge_qsets(adap);
1238                 if (err)
1239                         goto out;
1240
1241                 for_each_port(adap, i)
1242                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1243
1244                 setup_rss(adap);
1245                 if (!(adap->flags & NAPI_INIT))
1246                         init_napi(adap);
1247
1248                 t3_start_sge_timers(adap);
1249                 adap->flags |= FULL_INIT_DONE;
1250         }
1251
1252         t3_intr_clear(adap);
1253
1254         if (adap->flags & USING_MSIX) {
1255                 name_msix_vecs(adap);
1256                 err = request_irq(adap->msix_info[0].vec,
1257                                   t3_async_intr_handler, 0,
1258                                   adap->msix_info[0].desc, adap);
1259                 if (err)
1260                         goto irq_err;
1261
1262                 err = request_msix_data_irqs(adap);
1263                 if (err) {
1264                         free_irq(adap->msix_info[0].vec, adap);
1265                         goto irq_err;
1266                 }
1267         } else {
1268                 err = request_irq(adap->pdev->irq,
1269                                   t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
1270                                   (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
1271                                   adap->name, adap);
1272                 if (err)
1273                         goto irq_err;
1274         }
1275
1276         enable_all_napi(adap);
1277         t3_sge_start(adap);
1278         t3_intr_enable(adap);
1279
1280         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1281             is_offload(adap) && init_tp_parity(adap) == 0)
1282                 adap->flags |= TP_PARITY_INIT;
1283
1284         if (adap->flags & TP_PARITY_INIT) {
1285                 t3_write_reg(adap, A_TP_INT_CAUSE,
1286                              F_CMCACHEPERR | F_ARPLUTPERR);
1287                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1288         }
1289
1290         if (!(adap->flags & QUEUES_BOUND)) {
1291                 int ret = bind_qsets(adap);
1292
1293                 if (ret < 0) {
1294                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1295                         t3_intr_disable(adap);
1296                         quiesce_rx(adap);
1297                         free_irq_resources(adap);
1298                         err = ret;
1299                         goto out;
1300                 }
1301                 adap->flags |= QUEUES_BOUND;
1302         }
1303
1304 out:
1305         return err;
1306 irq_err:
1307         CH_ERR(adap, "request_irq failed, err %d\n", err);
1308         goto out;
1309 }
1310
1311 /*
1312  * Release resources when all the ports and offloading have been stopped.
1313  */
1314 static void cxgb_down(struct adapter *adapter, int on_wq)
1315 {
1316         t3_sge_stop(adapter);
1317         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1318         t3_intr_disable(adapter);
1319         spin_unlock_irq(&adapter->work_lock);
1320
1321         free_irq_resources(adapter);
1322         quiesce_rx(adapter);
1323         t3_sge_stop(adapter);
1324         if (!on_wq)
1325                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1326 }
1327
1328 static void schedule_chk_task(struct adapter *adap)
1329 {
1330         unsigned int timeo;
1331
1332         timeo = adap->params.linkpoll_period ?
1333             (HZ * adap->params.linkpoll_period) / 10 :
1334             adap->params.stats_update_period * HZ;
1335         if (timeo)
1336                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1337 }
1338
1339 static int offload_open(struct net_device *dev)
1340 {
1341         struct port_info *pi = netdev_priv(dev);
1342         struct adapter *adapter = pi->adapter;
1343         struct t3cdev *tdev = dev2t3cdev(dev);
1344         int adap_up = adapter->open_device_map & PORT_MASK;
1345         int err;
1346
1347         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1348                 return 0;
1349
1350         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1351                 goto out;
1352
1353         t3_tp_set_offload_mode(adapter, 1);
1354         tdev->lldev = adapter->port[0];
1355         err = cxgb3_offload_activate(adapter);
1356         if (err)
1357                 goto out;
1358
1359         init_port_mtus(adapter);
1360         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1361                      adapter->params.b_wnd,
1362                      adapter->params.rev == 0 ?
1363                      adapter->port[0]->mtu : 0xffff);
1364         init_smt(adapter);
1365
1366         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1367                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1368
1369         /* Call back all registered clients */
1370         cxgb3_add_clients(tdev);
1371
1372 out:
1373         /* restore them in case the offload module has changed them */
1374         if (err) {
1375                 t3_tp_set_offload_mode(adapter, 0);
1376                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1377                 cxgb3_set_dummy_ops(tdev);
1378         }
1379         return err;
1380 }
1381
1382 static int offload_close(struct t3cdev *tdev)
1383 {
1384         struct adapter *adapter = tdev2adap(tdev);
1385         struct t3c_data *td = T3C_DATA(tdev);
1386
1387         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1388                 return 0;
1389
1390         /* Call back all registered clients */
1391         cxgb3_remove_clients(tdev);
1392
1393         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1394
1395         /* Flush work scheduled while releasing TIDs */
1396         flush_work(&td->tid_release_task);
1397
1398         tdev->lldev = NULL;
1399         cxgb3_set_dummy_ops(tdev);
1400         t3_tp_set_offload_mode(adapter, 0);
1401         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1402
1403         if (!adapter->open_device_map)
1404                 cxgb_down(adapter, 0);
1405
1406         cxgb3_offload_deactivate(adapter);
1407         return 0;
1408 }
1409
1410 static int cxgb_open(struct net_device *dev)
1411 {
1412         struct port_info *pi = netdev_priv(dev);
1413         struct adapter *adapter = pi->adapter;
1414         int other_ports = adapter->open_device_map & PORT_MASK;
1415         int err;
1416
1417         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1418                 return err;
1419
1420         set_bit(pi->port_id, &adapter->open_device_map);
1421         if (is_offload(adapter) && !ofld_disable) {
1422                 err = offload_open(dev);
1423                 if (err)
1424                         pr_warn("Could not initialize offload capabilities\n");
1425         }
1426
1427         netif_set_real_num_tx_queues(dev, pi->nqsets);
1428         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1429         if (err)
1430                 return err;
1431         link_start(dev);
1432         t3_port_intr_enable(adapter, pi->port_id);
1433         netif_tx_start_all_queues(dev);
1434         if (!other_ports)
1435                 schedule_chk_task(adapter);
1436
1437         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1438         return 0;
1439 }
1440
1441 static int __cxgb_close(struct net_device *dev, int on_wq)
1442 {
1443         struct port_info *pi = netdev_priv(dev);
1444         struct adapter *adapter = pi->adapter;
1445
1446         
1447         if (!adapter->open_device_map)
1448                 return 0;
1449
1450         /* Stop link fault interrupts */
1451         t3_xgm_intr_disable(adapter, pi->port_id);
1452         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1453
1454         t3_port_intr_disable(adapter, pi->port_id);
1455         netif_tx_stop_all_queues(dev);
1456         pi->phy.ops->power_down(&pi->phy, 1);
1457         netif_carrier_off(dev);
1458         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1459
1460         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1461         clear_bit(pi->port_id, &adapter->open_device_map);
1462         spin_unlock_irq(&adapter->work_lock);
1463
1464         if (!(adapter->open_device_map & PORT_MASK))
1465                 cancel_delayed_work_sync(&adapter->adap_check_task);
1466
1467         if (!adapter->open_device_map)
1468                 cxgb_down(adapter, on_wq);
1469
1470         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1471         return 0;
1472 }
1473
1474 static int cxgb_close(struct net_device *dev)
1475 {
1476         return __cxgb_close(dev, 0);
1477 }
1478
1479 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1480 {
1481         struct port_info *pi = netdev_priv(dev);
1482         struct adapter *adapter = pi->adapter;
1483         struct net_device_stats *ns = &dev->stats;
1484         const struct mac_stats *pstats;
1485
1486         spin_lock(&adapter->stats_lock);
1487         pstats = t3_mac_update_stats(&pi->mac);
1488         spin_unlock(&adapter->stats_lock);
1489
1490         ns->tx_bytes = pstats->tx_octets;
1491         ns->tx_packets = pstats->tx_frames;
1492         ns->rx_bytes = pstats->rx_octets;
1493         ns->rx_packets = pstats->rx_frames;
1494         ns->multicast = pstats->rx_mcast_frames;
1495
1496         ns->tx_errors = pstats->tx_underrun;
1497         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1498             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1499             pstats->rx_fifo_ovfl;
1500
1501         /* detailed rx_errors */
1502         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1503         ns->rx_over_errors = 0;
1504         ns->rx_crc_errors = pstats->rx_fcs_errs;
1505         ns->rx_frame_errors = pstats->rx_symbol_errs;
1506         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1507         ns->rx_missed_errors = pstats->rx_cong_drops;
1508
1509         /* detailed tx_errors */
1510         ns->tx_aborted_errors = 0;
1511         ns->tx_carrier_errors = 0;
1512         ns->tx_fifo_errors = pstats->tx_underrun;
1513         ns->tx_heartbeat_errors = 0;
1514         ns->tx_window_errors = 0;
1515         return ns;
1516 }
1517
1518 static u32 get_msglevel(struct net_device *dev)
1519 {
1520         struct port_info *pi = netdev_priv(dev);
1521         struct adapter *adapter = pi->adapter;
1522
1523         return adapter->msg_enable;
1524 }
1525
1526 static void set_msglevel(struct net_device *dev, u32 val)
1527 {
1528         struct port_info *pi = netdev_priv(dev);
1529         struct adapter *adapter = pi->adapter;
1530
1531         adapter->msg_enable = val;
1532 }
1533
1534 static const char stats_strings[][ETH_GSTRING_LEN] = {
1535         "TxOctetsOK         ",
1536         "TxFramesOK         ",
1537         "TxMulticastFramesOK",
1538         "TxBroadcastFramesOK",
1539         "TxPauseFrames      ",
1540         "TxUnderrun         ",
1541         "TxExtUnderrun      ",
1542
1543         "TxFrames64         ",
1544         "TxFrames65To127    ",
1545         "TxFrames128To255   ",
1546         "TxFrames256To511   ",
1547         "TxFrames512To1023  ",
1548         "TxFrames1024To1518 ",
1549         "TxFrames1519ToMax  ",
1550
1551         "RxOctetsOK         ",
1552         "RxFramesOK         ",
1553         "RxMulticastFramesOK",
1554         "RxBroadcastFramesOK",
1555         "RxPauseFrames      ",
1556         "RxFCSErrors        ",
1557         "RxSymbolErrors     ",
1558         "RxShortErrors      ",
1559         "RxJabberErrors     ",
1560         "RxLengthErrors     ",
1561         "RxFIFOoverflow     ",
1562
1563         "RxFrames64         ",
1564         "RxFrames65To127    ",
1565         "RxFrames128To255   ",
1566         "RxFrames256To511   ",
1567         "RxFrames512To1023  ",
1568         "RxFrames1024To1518 ",
1569         "RxFrames1519ToMax  ",
1570
1571         "PhyFIFOErrors      ",
1572         "TSO                ",
1573         "VLANextractions    ",
1574         "VLANinsertions     ",
1575         "TxCsumOffload      ",
1576         "RxCsumGood         ",
1577         "LroAggregated      ",
1578         "LroFlushed         ",
1579         "LroNoDesc          ",
1580         "RxDrops            ",
1581
1582         "CheckTXEnToggled   ",
1583         "CheckResets        ",
1584
1585         "LinkFaults         ",
1586 };
1587
1588 static int get_sset_count(struct net_device *dev, int sset)
1589 {
1590         switch (sset) {
1591         case ETH_SS_STATS:
1592                 return ARRAY_SIZE(stats_strings);
1593         default:
1594                 return -EOPNOTSUPP;
1595         }
1596 }
1597
1598 #define T3_REGMAP_SIZE (3 * 1024)
1599
1600 static int get_regs_len(struct net_device *dev)
1601 {
1602         return T3_REGMAP_SIZE;
1603 }
1604
1605 static int get_eeprom_len(struct net_device *dev)
1606 {
1607         return EEPROMSIZE;
1608 }
1609
1610 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1611 {
1612         struct port_info *pi = netdev_priv(dev);
1613         struct adapter *adapter = pi->adapter;
1614         u32 fw_vers = 0;
1615         u32 tp_vers = 0;
1616
1617         spin_lock(&adapter->stats_lock);
1618         t3_get_fw_version(adapter, &fw_vers);
1619         t3_get_tp_version(adapter, &tp_vers);
1620         spin_unlock(&adapter->stats_lock);
1621
1622         strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1623         strscpy(info->bus_info, pci_name(adapter->pdev),
1624                 sizeof(info->bus_info));
1625         if (fw_vers)
1626                 snprintf(info->fw_version, sizeof(info->fw_version),
1627                          "%s %u.%u.%u TP %u.%u.%u",
1628                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1629                          G_FW_VERSION_MAJOR(fw_vers),
1630                          G_FW_VERSION_MINOR(fw_vers),
1631                          G_FW_VERSION_MICRO(fw_vers),
1632                          G_TP_VERSION_MAJOR(tp_vers),
1633                          G_TP_VERSION_MINOR(tp_vers),
1634                          G_TP_VERSION_MICRO(tp_vers));
1635 }
1636
1637 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1638 {
1639         if (stringset == ETH_SS_STATS)
1640                 memcpy(data, stats_strings, sizeof(stats_strings));
1641 }
1642
1643 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1644                                             struct port_info *p, int idx)
1645 {
1646         int i;
1647         unsigned long tot = 0;
1648
1649         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1650                 tot += adapter->sge.qs[i].port_stats[idx];
1651         return tot;
1652 }
1653
1654 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1655                       u64 *data)
1656 {
1657         struct port_info *pi = netdev_priv(dev);
1658         struct adapter *adapter = pi->adapter;
1659         const struct mac_stats *s;
1660
1661         spin_lock(&adapter->stats_lock);
1662         s = t3_mac_update_stats(&pi->mac);
1663         spin_unlock(&adapter->stats_lock);
1664
1665         *data++ = s->tx_octets;
1666         *data++ = s->tx_frames;
1667         *data++ = s->tx_mcast_frames;
1668         *data++ = s->tx_bcast_frames;
1669         *data++ = s->tx_pause;
1670         *data++ = s->tx_underrun;
1671         *data++ = s->tx_fifo_urun;
1672
1673         *data++ = s->tx_frames_64;
1674         *data++ = s->tx_frames_65_127;
1675         *data++ = s->tx_frames_128_255;
1676         *data++ = s->tx_frames_256_511;
1677         *data++ = s->tx_frames_512_1023;
1678         *data++ = s->tx_frames_1024_1518;
1679         *data++ = s->tx_frames_1519_max;
1680
1681         *data++ = s->rx_octets;
1682         *data++ = s->rx_frames;
1683         *data++ = s->rx_mcast_frames;
1684         *data++ = s->rx_bcast_frames;
1685         *data++ = s->rx_pause;
1686         *data++ = s->rx_fcs_errs;
1687         *data++ = s->rx_symbol_errs;
1688         *data++ = s->rx_short;
1689         *data++ = s->rx_jabber;
1690         *data++ = s->rx_too_long;
1691         *data++ = s->rx_fifo_ovfl;
1692
1693         *data++ = s->rx_frames_64;
1694         *data++ = s->rx_frames_65_127;
1695         *data++ = s->rx_frames_128_255;
1696         *data++ = s->rx_frames_256_511;
1697         *data++ = s->rx_frames_512_1023;
1698         *data++ = s->rx_frames_1024_1518;
1699         *data++ = s->rx_frames_1519_max;
1700
1701         *data++ = pi->phy.fifo_errors;
1702
1703         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1704         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1705         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1706         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1707         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1708         *data++ = 0;
1709         *data++ = 0;
1710         *data++ = 0;
1711         *data++ = s->rx_cong_drops;
1712
1713         *data++ = s->num_toggled;
1714         *data++ = s->num_resets;
1715
1716         *data++ = s->link_faults;
1717 }
1718
1719 static inline void reg_block_dump(struct adapter *ap, void *buf,
1720                                   unsigned int start, unsigned int end)
1721 {
1722         u32 *p = buf + start;
1723
1724         for (; start <= end; start += sizeof(u32))
1725                 *p++ = t3_read_reg(ap, start);
1726 }
1727
1728 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1729                      void *buf)
1730 {
1731         struct port_info *pi = netdev_priv(dev);
1732         struct adapter *ap = pi->adapter;
1733
1734         /*
1735          * Version scheme:
1736          * bits 0..9: chip version
1737          * bits 10..15: chip revision
1738          * bit 31: set for PCIe cards
1739          */
1740         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1741
1742         /*
1743          * We skip the MAC statistics registers because they are clear-on-read.
1744          * Also reading multi-register stats would need to synchronize with the
1745          * periodic mac stats accumulation.  Hard to justify the complexity.
1746          */
1747         memset(buf, 0, T3_REGMAP_SIZE);
1748         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1749         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1750         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1751         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1752         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1753         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1754                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1755         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1756                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1757 }
1758
1759 static int restart_autoneg(struct net_device *dev)
1760 {
1761         struct port_info *p = netdev_priv(dev);
1762
1763         if (!netif_running(dev))
1764                 return -EAGAIN;
1765         if (p->link_config.autoneg != AUTONEG_ENABLE)
1766                 return -EINVAL;
1767         p->phy.ops->autoneg_restart(&p->phy);
1768         return 0;
1769 }
1770
1771 static int set_phys_id(struct net_device *dev,
1772                        enum ethtool_phys_id_state state)
1773 {
1774         struct port_info *pi = netdev_priv(dev);
1775         struct adapter *adapter = pi->adapter;
1776
1777         switch (state) {
1778         case ETHTOOL_ID_ACTIVE:
1779                 return 1;       /* cycle on/off once per second */
1780
1781         case ETHTOOL_ID_OFF:
1782                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1783                 break;
1784
1785         case ETHTOOL_ID_ON:
1786         case ETHTOOL_ID_INACTIVE:
1787                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1788                          F_GPIO0_OUT_VAL);
1789         }
1790
1791         return 0;
1792 }
1793
1794 static int get_link_ksettings(struct net_device *dev,
1795                               struct ethtool_link_ksettings *cmd)
1796 {
1797         struct port_info *p = netdev_priv(dev);
1798         u32 supported;
1799
1800         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1801                                                 p->link_config.supported);
1802         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1803                                                 p->link_config.advertising);
1804
1805         if (netif_carrier_ok(dev)) {
1806                 cmd->base.speed = p->link_config.speed;
1807                 cmd->base.duplex = p->link_config.duplex;
1808         } else {
1809                 cmd->base.speed = SPEED_UNKNOWN;
1810                 cmd->base.duplex = DUPLEX_UNKNOWN;
1811         }
1812
1813         ethtool_convert_link_mode_to_legacy_u32(&supported,
1814                                                 cmd->link_modes.supported);
1815
1816         cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1817         cmd->base.phy_address = p->phy.mdio.prtad;
1818         cmd->base.autoneg = p->link_config.autoneg;
1819         return 0;
1820 }
1821
1822 static int speed_duplex_to_caps(int speed, int duplex)
1823 {
1824         int cap = 0;
1825
1826         switch (speed) {
1827         case SPEED_10:
1828                 if (duplex == DUPLEX_FULL)
1829                         cap = SUPPORTED_10baseT_Full;
1830                 else
1831                         cap = SUPPORTED_10baseT_Half;
1832                 break;
1833         case SPEED_100:
1834                 if (duplex == DUPLEX_FULL)
1835                         cap = SUPPORTED_100baseT_Full;
1836                 else
1837                         cap = SUPPORTED_100baseT_Half;
1838                 break;
1839         case SPEED_1000:
1840                 if (duplex == DUPLEX_FULL)
1841                         cap = SUPPORTED_1000baseT_Full;
1842                 else
1843                         cap = SUPPORTED_1000baseT_Half;
1844                 break;
1845         case SPEED_10000:
1846                 if (duplex == DUPLEX_FULL)
1847                         cap = SUPPORTED_10000baseT_Full;
1848         }
1849         return cap;
1850 }
1851
1852 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1853                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1854                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1855                       ADVERTISED_10000baseT_Full)
1856
1857 static int set_link_ksettings(struct net_device *dev,
1858                               const struct ethtool_link_ksettings *cmd)
1859 {
1860         struct port_info *p = netdev_priv(dev);
1861         struct link_config *lc = &p->link_config;
1862         u32 advertising;
1863
1864         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1865                                                 cmd->link_modes.advertising);
1866
1867         if (!(lc->supported & SUPPORTED_Autoneg)) {
1868                 /*
1869                  * PHY offers a single speed/duplex.  See if that's what's
1870                  * being requested.
1871                  */
1872                 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1873                         u32 speed = cmd->base.speed;
1874                         int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1875                         if (lc->supported & cap)
1876                                 return 0;
1877                 }
1878                 return -EINVAL;
1879         }
1880
1881         if (cmd->base.autoneg == AUTONEG_DISABLE) {
1882                 u32 speed = cmd->base.speed;
1883                 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1884
1885                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1886                         return -EINVAL;
1887                 lc->requested_speed = speed;
1888                 lc->requested_duplex = cmd->base.duplex;
1889                 lc->advertising = 0;
1890         } else {
1891                 advertising &= ADVERTISED_MASK;
1892                 advertising &= lc->supported;
1893                 if (!advertising)
1894                         return -EINVAL;
1895                 lc->requested_speed = SPEED_INVALID;
1896                 lc->requested_duplex = DUPLEX_INVALID;
1897                 lc->advertising = advertising | ADVERTISED_Autoneg;
1898         }
1899         lc->autoneg = cmd->base.autoneg;
1900         if (netif_running(dev))
1901                 t3_link_start(&p->phy, &p->mac, lc);
1902         return 0;
1903 }
1904
1905 static void get_pauseparam(struct net_device *dev,
1906                            struct ethtool_pauseparam *epause)
1907 {
1908         struct port_info *p = netdev_priv(dev);
1909
1910         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1911         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1912         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1913 }
1914
1915 static int set_pauseparam(struct net_device *dev,
1916                           struct ethtool_pauseparam *epause)
1917 {
1918         struct port_info *p = netdev_priv(dev);
1919         struct link_config *lc = &p->link_config;
1920
1921         if (epause->autoneg == AUTONEG_DISABLE)
1922                 lc->requested_fc = 0;
1923         else if (lc->supported & SUPPORTED_Autoneg)
1924                 lc->requested_fc = PAUSE_AUTONEG;
1925         else
1926                 return -EINVAL;
1927
1928         if (epause->rx_pause)
1929                 lc->requested_fc |= PAUSE_RX;
1930         if (epause->tx_pause)
1931                 lc->requested_fc |= PAUSE_TX;
1932         if (lc->autoneg == AUTONEG_ENABLE) {
1933                 if (netif_running(dev))
1934                         t3_link_start(&p->phy, &p->mac, lc);
1935         } else {
1936                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1937                 if (netif_running(dev))
1938                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1939         }
1940         return 0;
1941 }
1942
1943 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
1944                           struct kernel_ethtool_ringparam *kernel_e,
1945                           struct netlink_ext_ack *extack)
1946 {
1947         struct port_info *pi = netdev_priv(dev);
1948         struct adapter *adapter = pi->adapter;
1949         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1950
1951         e->rx_max_pending = MAX_RX_BUFFERS;
1952         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1953         e->tx_max_pending = MAX_TXQ_ENTRIES;
1954
1955         e->rx_pending = q->fl_size;
1956         e->rx_mini_pending = q->rspq_size;
1957         e->rx_jumbo_pending = q->jumbo_size;
1958         e->tx_pending = q->txq_size[0];
1959 }
1960
1961 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
1962                          struct kernel_ethtool_ringparam *kernel_e,
1963                          struct netlink_ext_ack *extack)
1964 {
1965         struct port_info *pi = netdev_priv(dev);
1966         struct adapter *adapter = pi->adapter;
1967         struct qset_params *q;
1968         int i;
1969
1970         if (e->rx_pending > MAX_RX_BUFFERS ||
1971             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1972             e->tx_pending > MAX_TXQ_ENTRIES ||
1973             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1974             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1975             e->rx_pending < MIN_FL_ENTRIES ||
1976             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1977             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1978                 return -EINVAL;
1979
1980         if (adapter->flags & FULL_INIT_DONE)
1981                 return -EBUSY;
1982
1983         q = &adapter->params.sge.qset[pi->first_qset];
1984         for (i = 0; i < pi->nqsets; ++i, ++q) {
1985                 q->rspq_size = e->rx_mini_pending;
1986                 q->fl_size = e->rx_pending;
1987                 q->jumbo_size = e->rx_jumbo_pending;
1988                 q->txq_size[0] = e->tx_pending;
1989                 q->txq_size[1] = e->tx_pending;
1990                 q->txq_size[2] = e->tx_pending;
1991         }
1992         return 0;
1993 }
1994
1995 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
1996                         struct kernel_ethtool_coalesce *kernel_coal,
1997                         struct netlink_ext_ack *extack)
1998 {
1999         struct port_info *pi = netdev_priv(dev);
2000         struct adapter *adapter = pi->adapter;
2001         struct qset_params *qsp;
2002         struct sge_qset *qs;
2003         int i;
2004
2005         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2006                 return -EINVAL;
2007
2008         for (i = 0; i < pi->nqsets; i++) {
2009                 qsp = &adapter->params.sge.qset[i];
2010                 qs = &adapter->sge.qs[i];
2011                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2012                 t3_update_qset_coalesce(qs, qsp);
2013         }
2014
2015         return 0;
2016 }
2017
2018 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
2019                         struct kernel_ethtool_coalesce *kernel_coal,
2020                         struct netlink_ext_ack *extack)
2021 {
2022         struct port_info *pi = netdev_priv(dev);
2023         struct adapter *adapter = pi->adapter;
2024         struct qset_params *q = adapter->params.sge.qset;
2025
2026         c->rx_coalesce_usecs = q->coalesce_usecs;
2027         return 0;
2028 }
2029
2030 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2031                       u8 * data)
2032 {
2033         struct port_info *pi = netdev_priv(dev);
2034         struct adapter *adapter = pi->adapter;
2035         int cnt;
2036
2037         e->magic = EEPROM_MAGIC;
2038         cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data);
2039         if (cnt < 0)
2040                 return cnt;
2041
2042         e->len = cnt;
2043
2044         return 0;
2045 }
2046
2047 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2048                       u8 * data)
2049 {
2050         struct port_info *pi = netdev_priv(dev);
2051         struct adapter *adapter = pi->adapter;
2052         u32 aligned_offset, aligned_len;
2053         u8 *buf;
2054         int err;
2055
2056         if (eeprom->magic != EEPROM_MAGIC)
2057                 return -EINVAL;
2058
2059         aligned_offset = eeprom->offset & ~3;
2060         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2061
2062         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2063                 buf = kmalloc(aligned_len, GFP_KERNEL);
2064                 if (!buf)
2065                         return -ENOMEM;
2066                 err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len,
2067                                    buf);
2068                 if (err < 0)
2069                         goto out;
2070                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2071         } else
2072                 buf = data;
2073
2074         err = t3_seeprom_wp(adapter, 0);
2075         if (err)
2076                 goto out;
2077
2078         err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf);
2079         if (err >= 0)
2080                 err = t3_seeprom_wp(adapter, 1);
2081 out:
2082         if (buf != data)
2083                 kfree(buf);
2084         return err < 0 ? err : 0;
2085 }
2086
2087 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2088 {
2089         wol->supported = 0;
2090         wol->wolopts = 0;
2091         memset(&wol->sopass, 0, sizeof(wol->sopass));
2092 }
2093
2094 static const struct ethtool_ops cxgb_ethtool_ops = {
2095         .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
2096         .get_drvinfo = get_drvinfo,
2097         .get_msglevel = get_msglevel,
2098         .set_msglevel = set_msglevel,
2099         .get_ringparam = get_sge_param,
2100         .set_ringparam = set_sge_param,
2101         .get_coalesce = get_coalesce,
2102         .set_coalesce = set_coalesce,
2103         .get_eeprom_len = get_eeprom_len,
2104         .get_eeprom = get_eeprom,
2105         .set_eeprom = set_eeprom,
2106         .get_pauseparam = get_pauseparam,
2107         .set_pauseparam = set_pauseparam,
2108         .get_link = ethtool_op_get_link,
2109         .get_strings = get_strings,
2110         .set_phys_id = set_phys_id,
2111         .nway_reset = restart_autoneg,
2112         .get_sset_count = get_sset_count,
2113         .get_ethtool_stats = get_stats,
2114         .get_regs_len = get_regs_len,
2115         .get_regs = get_regs,
2116         .get_wol = get_wol,
2117         .get_link_ksettings = get_link_ksettings,
2118         .set_link_ksettings = set_link_ksettings,
2119 };
2120
2121 static int cxgb_in_range(int val, int lo, int hi)
2122 {
2123         return val < 0 || (val <= hi && val >= lo);
2124 }
2125
2126 static int cxgb_siocdevprivate(struct net_device *dev,
2127                                struct ifreq *ifreq,
2128                                void __user *useraddr,
2129                                int cmd)
2130 {
2131         struct port_info *pi = netdev_priv(dev);
2132         struct adapter *adapter = pi->adapter;
2133         int ret;
2134
2135         if (cmd != SIOCCHIOCTL)
2136                 return -EOPNOTSUPP;
2137
2138         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2139                 return -EFAULT;
2140
2141         switch (cmd) {
2142         case CHELSIO_SET_QSET_PARAMS:{
2143                 int i;
2144                 struct qset_params *q;
2145                 struct ch_qset_params t;
2146                 int q1 = pi->first_qset;
2147                 int nqsets = pi->nqsets;
2148
2149                 if (!capable(CAP_NET_ADMIN))
2150                         return -EPERM;
2151                 if (copy_from_user(&t, useraddr, sizeof(t)))
2152                         return -EFAULT;
2153                 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2154                         return -EINVAL;
2155                 if (t.qset_idx >= SGE_QSETS)
2156                         return -EINVAL;
2157                 if (!cxgb_in_range(t.intr_lat, 0, M_NEWTIMER) ||
2158                     !cxgb_in_range(t.cong_thres, 0, 255) ||
2159                     !cxgb_in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2160                               MAX_TXQ_ENTRIES) ||
2161                     !cxgb_in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2162                               MAX_TXQ_ENTRIES) ||
2163                     !cxgb_in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2164                               MAX_CTRL_TXQ_ENTRIES) ||
2165                     !cxgb_in_range(t.fl_size[0], MIN_FL_ENTRIES,
2166                               MAX_RX_BUFFERS) ||
2167                     !cxgb_in_range(t.fl_size[1], MIN_FL_ENTRIES,
2168                               MAX_RX_JUMBO_BUFFERS) ||
2169                     !cxgb_in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2170                               MAX_RSPQ_ENTRIES))
2171                         return -EINVAL;
2172
2173                 if ((adapter->flags & FULL_INIT_DONE) &&
2174                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2175                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2176                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2177                         t.polling >= 0 || t.cong_thres >= 0))
2178                         return -EBUSY;
2179
2180                 /* Allow setting of any available qset when offload enabled */
2181                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2182                         q1 = 0;
2183                         for_each_port(adapter, i) {
2184                                 pi = adap2pinfo(adapter, i);
2185                                 nqsets += pi->first_qset + pi->nqsets;
2186                         }
2187                 }
2188
2189                 if (t.qset_idx < q1)
2190                         return -EINVAL;
2191                 if (t.qset_idx > q1 + nqsets - 1)
2192                         return -EINVAL;
2193
2194                 q = &adapter->params.sge.qset[t.qset_idx];
2195
2196                 if (t.rspq_size >= 0)
2197                         q->rspq_size = t.rspq_size;
2198                 if (t.fl_size[0] >= 0)
2199                         q->fl_size = t.fl_size[0];
2200                 if (t.fl_size[1] >= 0)
2201                         q->jumbo_size = t.fl_size[1];
2202                 if (t.txq_size[0] >= 0)
2203                         q->txq_size[0] = t.txq_size[0];
2204                 if (t.txq_size[1] >= 0)
2205                         q->txq_size[1] = t.txq_size[1];
2206                 if (t.txq_size[2] >= 0)
2207                         q->txq_size[2] = t.txq_size[2];
2208                 if (t.cong_thres >= 0)
2209                         q->cong_thres = t.cong_thres;
2210                 if (t.intr_lat >= 0) {
2211                         struct sge_qset *qs =
2212                                 &adapter->sge.qs[t.qset_idx];
2213
2214                         q->coalesce_usecs = t.intr_lat;
2215                         t3_update_qset_coalesce(qs, q);
2216                 }
2217                 if (t.polling >= 0) {
2218                         if (adapter->flags & USING_MSIX)
2219                                 q->polling = t.polling;
2220                         else {
2221                                 /* No polling with INTx for T3A */
2222                                 if (adapter->params.rev == 0 &&
2223                                         !(adapter->flags & USING_MSI))
2224                                         t.polling = 0;
2225
2226                                 for (i = 0; i < SGE_QSETS; i++) {
2227                                         q = &adapter->params.sge.
2228                                                 qset[i];
2229                                         q->polling = t.polling;
2230                                 }
2231                         }
2232                 }
2233
2234                 if (t.lro >= 0) {
2235                         if (t.lro)
2236                                 dev->wanted_features |= NETIF_F_GRO;
2237                         else
2238                                 dev->wanted_features &= ~NETIF_F_GRO;
2239                         netdev_update_features(dev);
2240                 }
2241
2242                 break;
2243         }
2244         case CHELSIO_GET_QSET_PARAMS:{
2245                 struct qset_params *q;
2246                 struct ch_qset_params t;
2247                 int q1 = pi->first_qset;
2248                 int nqsets = pi->nqsets;
2249                 int i;
2250
2251                 if (copy_from_user(&t, useraddr, sizeof(t)))
2252                         return -EFAULT;
2253
2254                 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2255                         return -EINVAL;
2256
2257                 /* Display qsets for all ports when offload enabled */
2258                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2259                         q1 = 0;
2260                         for_each_port(adapter, i) {
2261                                 pi = adap2pinfo(adapter, i);
2262                                 nqsets = pi->first_qset + pi->nqsets;
2263                         }
2264                 }
2265
2266                 if (t.qset_idx >= nqsets)
2267                         return -EINVAL;
2268                 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2269
2270                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2271                 t.rspq_size = q->rspq_size;
2272                 t.txq_size[0] = q->txq_size[0];
2273                 t.txq_size[1] = q->txq_size[1];
2274                 t.txq_size[2] = q->txq_size[2];
2275                 t.fl_size[0] = q->fl_size;
2276                 t.fl_size[1] = q->jumbo_size;
2277                 t.polling = q->polling;
2278                 t.lro = !!(dev->features & NETIF_F_GRO);
2279                 t.intr_lat = q->coalesce_usecs;
2280                 t.cong_thres = q->cong_thres;
2281                 t.qnum = q1;
2282
2283                 if (adapter->flags & USING_MSIX)
2284                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2285                 else
2286                         t.vector = adapter->pdev->irq;
2287
2288                 if (copy_to_user(useraddr, &t, sizeof(t)))
2289                         return -EFAULT;
2290                 break;
2291         }
2292         case CHELSIO_SET_QSET_NUM:{
2293                 struct ch_reg edata;
2294                 unsigned int i, first_qset = 0, other_qsets = 0;
2295
2296                 if (!capable(CAP_NET_ADMIN))
2297                         return -EPERM;
2298                 if (adapter->flags & FULL_INIT_DONE)
2299                         return -EBUSY;
2300                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2301                         return -EFAULT;
2302                 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2303                         return -EINVAL;
2304                 if (edata.val < 1 ||
2305                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2306                         return -EINVAL;
2307
2308                 for_each_port(adapter, i)
2309                         if (adapter->port[i] && adapter->port[i] != dev)
2310                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2311
2312                 if (edata.val + other_qsets > SGE_QSETS)
2313                         return -EINVAL;
2314
2315                 pi->nqsets = edata.val;
2316
2317                 for_each_port(adapter, i)
2318                         if (adapter->port[i]) {
2319                                 pi = adap2pinfo(adapter, i);
2320                                 pi->first_qset = first_qset;
2321                                 first_qset += pi->nqsets;
2322                         }
2323                 break;
2324         }
2325         case CHELSIO_GET_QSET_NUM:{
2326                 struct ch_reg edata;
2327
2328                 memset(&edata, 0, sizeof(struct ch_reg));
2329
2330                 edata.cmd = CHELSIO_GET_QSET_NUM;
2331                 edata.val = pi->nqsets;
2332                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2333                         return -EFAULT;
2334                 break;
2335         }
2336         case CHELSIO_LOAD_FW:{
2337                 u8 *fw_data;
2338                 struct ch_mem_range t;
2339
2340                 if (!capable(CAP_SYS_RAWIO))
2341                         return -EPERM;
2342                 if (copy_from_user(&t, useraddr, sizeof(t)))
2343                         return -EFAULT;
2344                 if (t.cmd != CHELSIO_LOAD_FW)
2345                         return -EINVAL;
2346                 /* Check t.len sanity ? */
2347                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2348                 if (IS_ERR(fw_data))
2349                         return PTR_ERR(fw_data);
2350
2351                 ret = t3_load_fw(adapter, fw_data, t.len);
2352                 kfree(fw_data);
2353                 if (ret)
2354                         return ret;
2355                 break;
2356         }
2357         case CHELSIO_SETMTUTAB:{
2358                 struct ch_mtus m;
2359                 int i;
2360
2361                 if (!is_offload(adapter))
2362                         return -EOPNOTSUPP;
2363                 if (!capable(CAP_NET_ADMIN))
2364                         return -EPERM;
2365                 if (offload_running(adapter))
2366                         return -EBUSY;
2367                 if (copy_from_user(&m, useraddr, sizeof(m)))
2368                         return -EFAULT;
2369                 if (m.cmd != CHELSIO_SETMTUTAB)
2370                         return -EINVAL;
2371                 if (m.nmtus != NMTUS)
2372                         return -EINVAL;
2373                 if (m.mtus[0] < 81)     /* accommodate SACK */
2374                         return -EINVAL;
2375
2376                 /* MTUs must be in ascending order */
2377                 for (i = 1; i < NMTUS; ++i)
2378                         if (m.mtus[i] < m.mtus[i - 1])
2379                                 return -EINVAL;
2380
2381                 memcpy(adapter->params.mtus, m.mtus,
2382                         sizeof(adapter->params.mtus));
2383                 break;
2384         }
2385         case CHELSIO_GET_PM:{
2386                 struct tp_params *p = &adapter->params.tp;
2387                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2388
2389                 if (!is_offload(adapter))
2390                         return -EOPNOTSUPP;
2391                 m.tx_pg_sz = p->tx_pg_size;
2392                 m.tx_num_pg = p->tx_num_pgs;
2393                 m.rx_pg_sz = p->rx_pg_size;
2394                 m.rx_num_pg = p->rx_num_pgs;
2395                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2396                 if (copy_to_user(useraddr, &m, sizeof(m)))
2397                         return -EFAULT;
2398                 break;
2399         }
2400         case CHELSIO_SET_PM:{
2401                 struct ch_pm m;
2402                 struct tp_params *p = &adapter->params.tp;
2403
2404                 if (!is_offload(adapter))
2405                         return -EOPNOTSUPP;
2406                 if (!capable(CAP_NET_ADMIN))
2407                         return -EPERM;
2408                 if (adapter->flags & FULL_INIT_DONE)
2409                         return -EBUSY;
2410                 if (copy_from_user(&m, useraddr, sizeof(m)))
2411                         return -EFAULT;
2412                 if (m.cmd != CHELSIO_SET_PM)
2413                         return -EINVAL;
2414                 if (!is_power_of_2(m.rx_pg_sz) ||
2415                         !is_power_of_2(m.tx_pg_sz))
2416                         return -EINVAL; /* not power of 2 */
2417                 if (!(m.rx_pg_sz & 0x14000))
2418                         return -EINVAL; /* not 16KB or 64KB */
2419                 if (!(m.tx_pg_sz & 0x1554000))
2420                         return -EINVAL;
2421                 if (m.tx_num_pg == -1)
2422                         m.tx_num_pg = p->tx_num_pgs;
2423                 if (m.rx_num_pg == -1)
2424                         m.rx_num_pg = p->rx_num_pgs;
2425                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2426                         return -EINVAL;
2427                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2428                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2429                         return -EINVAL;
2430                 p->rx_pg_size = m.rx_pg_sz;
2431                 p->tx_pg_size = m.tx_pg_sz;
2432                 p->rx_num_pgs = m.rx_num_pg;
2433                 p->tx_num_pgs = m.tx_num_pg;
2434                 break;
2435         }
2436         case CHELSIO_GET_MEM:{
2437                 struct ch_mem_range t;
2438                 struct mc7 *mem;
2439                 u64 buf[32];
2440
2441                 if (!is_offload(adapter))
2442                         return -EOPNOTSUPP;
2443                 if (!capable(CAP_NET_ADMIN))
2444                         return -EPERM;
2445                 if (!(adapter->flags & FULL_INIT_DONE))
2446                         return -EIO;    /* need the memory controllers */
2447                 if (copy_from_user(&t, useraddr, sizeof(t)))
2448                         return -EFAULT;
2449                 if (t.cmd != CHELSIO_GET_MEM)
2450                         return -EINVAL;
2451                 if ((t.addr & 7) || (t.len & 7))
2452                         return -EINVAL;
2453                 if (t.mem_id == MEM_CM)
2454                         mem = &adapter->cm;
2455                 else if (t.mem_id == MEM_PMRX)
2456                         mem = &adapter->pmrx;
2457                 else if (t.mem_id == MEM_PMTX)
2458                         mem = &adapter->pmtx;
2459                 else
2460                         return -EINVAL;
2461
2462                 /*
2463                  * Version scheme:
2464                  * bits 0..9: chip version
2465                  * bits 10..15: chip revision
2466                  */
2467                 t.version = 3 | (adapter->params.rev << 10);
2468                 if (copy_to_user(useraddr, &t, sizeof(t)))
2469                         return -EFAULT;
2470
2471                 /*
2472                  * Read 256 bytes at a time as len can be large and we don't
2473                  * want to use huge intermediate buffers.
2474                  */
2475                 useraddr += sizeof(t);  /* advance to start of buffer */
2476                 while (t.len) {
2477                         unsigned int chunk =
2478                                 min_t(unsigned int, t.len, sizeof(buf));
2479
2480                         ret =
2481                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2482                                                 buf);
2483                         if (ret)
2484                                 return ret;
2485                         if (copy_to_user(useraddr, buf, chunk))
2486                                 return -EFAULT;
2487                         useraddr += chunk;
2488                         t.addr += chunk;
2489                         t.len -= chunk;
2490                 }
2491                 break;
2492         }
2493         case CHELSIO_SET_TRACE_FILTER:{
2494                 struct ch_trace t;
2495                 const struct trace_params *tp;
2496
2497                 if (!capable(CAP_NET_ADMIN))
2498                         return -EPERM;
2499                 if (!offload_running(adapter))
2500                         return -EAGAIN;
2501                 if (copy_from_user(&t, useraddr, sizeof(t)))
2502                         return -EFAULT;
2503                 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2504                         return -EINVAL;
2505
2506                 tp = (const struct trace_params *)&t.sip;
2507                 if (t.config_tx)
2508                         t3_config_trace_filter(adapter, tp, 0,
2509                                                 t.invert_match,
2510                                                 t.trace_tx);
2511                 if (t.config_rx)
2512                         t3_config_trace_filter(adapter, tp, 1,
2513                                                 t.invert_match,
2514                                                 t.trace_rx);
2515                 break;
2516         }
2517         default:
2518                 return -EOPNOTSUPP;
2519         }
2520         return 0;
2521 }
2522
2523 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2524 {
2525         struct mii_ioctl_data *data = if_mii(req);
2526         struct port_info *pi = netdev_priv(dev);
2527         struct adapter *adapter = pi->adapter;
2528
2529         switch (cmd) {
2530         case SIOCGMIIREG:
2531         case SIOCSMIIREG:
2532                 /* Convert phy_id from older PRTAD/DEVAD format */
2533                 if (is_10G(adapter) &&
2534                     !mdio_phy_id_is_c45(data->phy_id) &&
2535                     (data->phy_id & 0x1f00) &&
2536                     !(data->phy_id & 0xe0e0))
2537                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2538                                                        data->phy_id & 0x1f);
2539                 fallthrough;
2540         case SIOCGMIIPHY:
2541                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2542         default:
2543                 return -EOPNOTSUPP;
2544         }
2545 }
2546
2547 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2548 {
2549         struct port_info *pi = netdev_priv(dev);
2550         struct adapter *adapter = pi->adapter;
2551         int ret;
2552
2553         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2554                 return ret;
2555         dev->mtu = new_mtu;
2556         init_port_mtus(adapter);
2557         if (adapter->params.rev == 0 && offload_running(adapter))
2558                 t3_load_mtus(adapter, adapter->params.mtus,
2559                              adapter->params.a_wnd, adapter->params.b_wnd,
2560                              adapter->port[0]->mtu);
2561         return 0;
2562 }
2563
2564 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2565 {
2566         struct port_info *pi = netdev_priv(dev);
2567         struct adapter *adapter = pi->adapter;
2568         struct sockaddr *addr = p;
2569
2570         if (!is_valid_ether_addr(addr->sa_data))
2571                 return -EADDRNOTAVAIL;
2572
2573         eth_hw_addr_set(dev, addr->sa_data);
2574         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2575         if (offload_running(adapter))
2576                 write_smt_entry(adapter, pi->port_id);
2577         return 0;
2578 }
2579
2580 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2581         netdev_features_t features)
2582 {
2583         /*
2584          * Since there is no support for separate rx/tx vlan accel
2585          * enable/disable make sure tx flag is always in same state as rx.
2586          */
2587         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2588                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2589         else
2590                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2591
2592         return features;
2593 }
2594
2595 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2596 {
2597         netdev_features_t changed = dev->features ^ features;
2598
2599         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2600                 cxgb_vlan_mode(dev, features);
2601
2602         return 0;
2603 }
2604
2605 #ifdef CONFIG_NET_POLL_CONTROLLER
2606 static void cxgb_netpoll(struct net_device *dev)
2607 {
2608         struct port_info *pi = netdev_priv(dev);
2609         struct adapter *adapter = pi->adapter;
2610         int qidx;
2611
2612         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2613                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2614                 void *source;
2615
2616                 if (adapter->flags & USING_MSIX)
2617                         source = qs;
2618                 else
2619                         source = adapter;
2620
2621                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2622         }
2623 }
2624 #endif
2625
2626 /*
2627  * Periodic accumulation of MAC statistics.
2628  */
2629 static void mac_stats_update(struct adapter *adapter)
2630 {
2631         int i;
2632
2633         for_each_port(adapter, i) {
2634                 struct net_device *dev = adapter->port[i];
2635                 struct port_info *p = netdev_priv(dev);
2636
2637                 if (netif_running(dev)) {
2638                         spin_lock(&adapter->stats_lock);
2639                         t3_mac_update_stats(&p->mac);
2640                         spin_unlock(&adapter->stats_lock);
2641                 }
2642         }
2643 }
2644
2645 static void check_link_status(struct adapter *adapter)
2646 {
2647         int i;
2648
2649         for_each_port(adapter, i) {
2650                 struct net_device *dev = adapter->port[i];
2651                 struct port_info *p = netdev_priv(dev);
2652                 int link_fault;
2653
2654                 spin_lock_irq(&adapter->work_lock);
2655                 link_fault = p->link_fault;
2656                 spin_unlock_irq(&adapter->work_lock);
2657
2658                 if (link_fault) {
2659                         t3_link_fault(adapter, i);
2660                         continue;
2661                 }
2662
2663                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2664                         t3_xgm_intr_disable(adapter, i);
2665                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2666
2667                         t3_link_changed(adapter, i);
2668                         t3_xgm_intr_enable(adapter, i);
2669                 }
2670         }
2671 }
2672
2673 static void check_t3b2_mac(struct adapter *adapter)
2674 {
2675         int i;
2676
2677         if (!rtnl_trylock())    /* synchronize with ifdown */
2678                 return;
2679
2680         for_each_port(adapter, i) {
2681                 struct net_device *dev = adapter->port[i];
2682                 struct port_info *p = netdev_priv(dev);
2683                 int status;
2684
2685                 if (!netif_running(dev))
2686                         continue;
2687
2688                 status = 0;
2689                 if (netif_running(dev) && netif_carrier_ok(dev))
2690                         status = t3b2_mac_watchdog_task(&p->mac);
2691                 if (status == 1)
2692                         p->mac.stats.num_toggled++;
2693                 else if (status == 2) {
2694                         struct cmac *mac = &p->mac;
2695
2696                         t3_mac_set_mtu(mac, dev->mtu);
2697                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2698                         cxgb_set_rxmode(dev);
2699                         t3_link_start(&p->phy, mac, &p->link_config);
2700                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2701                         t3_port_intr_enable(adapter, p->port_id);
2702                         p->mac.stats.num_resets++;
2703                 }
2704         }
2705         rtnl_unlock();
2706 }
2707
2708
2709 static void t3_adap_check_task(struct work_struct *work)
2710 {
2711         struct adapter *adapter = container_of(work, struct adapter,
2712                                                adap_check_task.work);
2713         const struct adapter_params *p = &adapter->params;
2714         int port;
2715         unsigned int v, status, reset;
2716
2717         adapter->check_task_cnt++;
2718
2719         check_link_status(adapter);
2720
2721         /* Accumulate MAC stats if needed */
2722         if (!p->linkpoll_period ||
2723             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2724             p->stats_update_period) {
2725                 mac_stats_update(adapter);
2726                 adapter->check_task_cnt = 0;
2727         }
2728
2729         if (p->rev == T3_REV_B2)
2730                 check_t3b2_mac(adapter);
2731
2732         /*
2733          * Scan the XGMAC's to check for various conditions which we want to
2734          * monitor in a periodic polling manner rather than via an interrupt
2735          * condition.  This is used for conditions which would otherwise flood
2736          * the system with interrupts and we only really need to know that the
2737          * conditions are "happening" ...  For each condition we count the
2738          * detection of the condition and reset it for the next polling loop.
2739          */
2740         for_each_port(adapter, port) {
2741                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2742                 u32 cause;
2743
2744                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2745                 reset = 0;
2746                 if (cause & F_RXFIFO_OVERFLOW) {
2747                         mac->stats.rx_fifo_ovfl++;
2748                         reset |= F_RXFIFO_OVERFLOW;
2749                 }
2750
2751                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2752         }
2753
2754         /*
2755          * We do the same as above for FL_EMPTY interrupts.
2756          */
2757         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2758         reset = 0;
2759
2760         if (status & F_FLEMPTY) {
2761                 struct sge_qset *qs = &adapter->sge.qs[0];
2762                 int i = 0;
2763
2764                 reset |= F_FLEMPTY;
2765
2766                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2767                     0xffff;
2768
2769                 while (v) {
2770                         qs->fl[i].empty += (v & 1);
2771                         if (i)
2772                                 qs++;
2773                         i ^= 1;
2774                         v >>= 1;
2775                 }
2776         }
2777
2778         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2779
2780         /* Schedule the next check update if any port is active. */
2781         spin_lock_irq(&adapter->work_lock);
2782         if (adapter->open_device_map & PORT_MASK)
2783                 schedule_chk_task(adapter);
2784         spin_unlock_irq(&adapter->work_lock);
2785 }
2786
2787 static void db_full_task(struct work_struct *work)
2788 {
2789         struct adapter *adapter = container_of(work, struct adapter,
2790                                                db_full_task);
2791
2792         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2793 }
2794
2795 static void db_empty_task(struct work_struct *work)
2796 {
2797         struct adapter *adapter = container_of(work, struct adapter,
2798                                                db_empty_task);
2799
2800         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2801 }
2802
2803 static void db_drop_task(struct work_struct *work)
2804 {
2805         struct adapter *adapter = container_of(work, struct adapter,
2806                                                db_drop_task);
2807         unsigned long delay = 1000;
2808         unsigned short r;
2809
2810         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2811
2812         /*
2813          * Sleep a while before ringing the driver qset dbs.
2814          * The delay is between 1000-2023 usecs.
2815          */
2816         get_random_bytes(&r, 2);
2817         delay += r & 1023;
2818         set_current_state(TASK_UNINTERRUPTIBLE);
2819         schedule_timeout(usecs_to_jiffies(delay));
2820         ring_dbs(adapter);
2821 }
2822
2823 /*
2824  * Processes external (PHY) interrupts in process context.
2825  */
2826 static void ext_intr_task(struct work_struct *work)
2827 {
2828         struct adapter *adapter = container_of(work, struct adapter,
2829                                                ext_intr_handler_task);
2830         int i;
2831
2832         /* Disable link fault interrupts */
2833         for_each_port(adapter, i) {
2834                 struct net_device *dev = adapter->port[i];
2835                 struct port_info *p = netdev_priv(dev);
2836
2837                 t3_xgm_intr_disable(adapter, i);
2838                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2839         }
2840
2841         /* Re-enable link fault interrupts */
2842         t3_phy_intr_handler(adapter);
2843
2844         for_each_port(adapter, i)
2845                 t3_xgm_intr_enable(adapter, i);
2846
2847         /* Now reenable external interrupts */
2848         spin_lock_irq(&adapter->work_lock);
2849         if (adapter->slow_intr_mask) {
2850                 adapter->slow_intr_mask |= F_T3DBG;
2851                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2852                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2853                              adapter->slow_intr_mask);
2854         }
2855         spin_unlock_irq(&adapter->work_lock);
2856 }
2857
2858 /*
2859  * Interrupt-context handler for external (PHY) interrupts.
2860  */
2861 void t3_os_ext_intr_handler(struct adapter *adapter)
2862 {
2863         /*
2864          * Schedule a task to handle external interrupts as they may be slow
2865          * and we use a mutex to protect MDIO registers.  We disable PHY
2866          * interrupts in the meantime and let the task reenable them when
2867          * it's done.
2868          */
2869         spin_lock(&adapter->work_lock);
2870         if (adapter->slow_intr_mask) {
2871                 adapter->slow_intr_mask &= ~F_T3DBG;
2872                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2873                              adapter->slow_intr_mask);
2874                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2875         }
2876         spin_unlock(&adapter->work_lock);
2877 }
2878
2879 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2880 {
2881         struct net_device *netdev = adapter->port[port_id];
2882         struct port_info *pi = netdev_priv(netdev);
2883
2884         spin_lock(&adapter->work_lock);
2885         pi->link_fault = 1;
2886         spin_unlock(&adapter->work_lock);
2887 }
2888
2889 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2890 {
2891         int i, ret = 0;
2892
2893         if (is_offload(adapter) &&
2894             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2895                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2896                 offload_close(&adapter->tdev);
2897         }
2898
2899         /* Stop all ports */
2900         for_each_port(adapter, i) {
2901                 struct net_device *netdev = adapter->port[i];
2902
2903                 if (netif_running(netdev))
2904                         __cxgb_close(netdev, on_wq);
2905         }
2906
2907         /* Stop SGE timers */
2908         t3_stop_sge_timers(adapter);
2909
2910         adapter->flags &= ~FULL_INIT_DONE;
2911
2912         if (reset)
2913                 ret = t3_reset_adapter(adapter);
2914
2915         pci_disable_device(adapter->pdev);
2916
2917         return ret;
2918 }
2919
2920 static int t3_reenable_adapter(struct adapter *adapter)
2921 {
2922         if (pci_enable_device(adapter->pdev)) {
2923                 dev_err(&adapter->pdev->dev,
2924                         "Cannot re-enable PCI device after reset.\n");
2925                 goto err;
2926         }
2927         pci_set_master(adapter->pdev);
2928         pci_restore_state(adapter->pdev);
2929         pci_save_state(adapter->pdev);
2930
2931         /* Free sge resources */
2932         t3_free_sge_resources(adapter);
2933
2934         if (t3_replay_prep_adapter(adapter))
2935                 goto err;
2936
2937         return 0;
2938 err:
2939         return -1;
2940 }
2941
2942 static void t3_resume_ports(struct adapter *adapter)
2943 {
2944         int i;
2945
2946         /* Restart the ports */
2947         for_each_port(adapter, i) {
2948                 struct net_device *netdev = adapter->port[i];
2949
2950                 if (netif_running(netdev)) {
2951                         if (cxgb_open(netdev)) {
2952                                 dev_err(&adapter->pdev->dev,
2953                                         "can't bring device back up"
2954                                         " after reset\n");
2955                                 continue;
2956                         }
2957                 }
2958         }
2959
2960         if (is_offload(adapter) && !ofld_disable)
2961                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2962 }
2963
2964 /*
2965  * processes a fatal error.
2966  * Bring the ports down, reset the chip, bring the ports back up.
2967  */
2968 static void fatal_error_task(struct work_struct *work)
2969 {
2970         struct adapter *adapter = container_of(work, struct adapter,
2971                                                fatal_error_handler_task);
2972         int err = 0;
2973
2974         rtnl_lock();
2975         err = t3_adapter_error(adapter, 1, 1);
2976         if (!err)
2977                 err = t3_reenable_adapter(adapter);
2978         if (!err)
2979                 t3_resume_ports(adapter);
2980
2981         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2982         rtnl_unlock();
2983 }
2984
2985 void t3_fatal_err(struct adapter *adapter)
2986 {
2987         unsigned int fw_status[4];
2988
2989         if (adapter->flags & FULL_INIT_DONE) {
2990                 t3_sge_stop_dma(adapter);
2991                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2992                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2993                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2994                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2995
2996                 spin_lock(&adapter->work_lock);
2997                 t3_intr_disable(adapter);
2998                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2999                 spin_unlock(&adapter->work_lock);
3000         }
3001         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3002         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3003                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3004                          fw_status[0], fw_status[1],
3005                          fw_status[2], fw_status[3]);
3006 }
3007
3008 /**
3009  * t3_io_error_detected - called when PCI error is detected
3010  * @pdev: Pointer to PCI device
3011  * @state: The current pci connection state
3012  *
3013  * This function is called after a PCI bus error affecting
3014  * this device has been detected.
3015  */
3016 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3017                                              pci_channel_state_t state)
3018 {
3019         struct adapter *adapter = pci_get_drvdata(pdev);
3020
3021         if (state == pci_channel_io_perm_failure)
3022                 return PCI_ERS_RESULT_DISCONNECT;
3023
3024         t3_adapter_error(adapter, 0, 0);
3025
3026         /* Request a slot reset. */
3027         return PCI_ERS_RESULT_NEED_RESET;
3028 }
3029
3030 /**
3031  * t3_io_slot_reset - called after the pci bus has been reset.
3032  * @pdev: Pointer to PCI device
3033  *
3034  * Restart the card from scratch, as if from a cold-boot.
3035  */
3036 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3037 {
3038         struct adapter *adapter = pci_get_drvdata(pdev);
3039
3040         if (!t3_reenable_adapter(adapter))
3041                 return PCI_ERS_RESULT_RECOVERED;
3042
3043         return PCI_ERS_RESULT_DISCONNECT;
3044 }
3045
3046 /**
3047  * t3_io_resume - called when traffic can start flowing again.
3048  * @pdev: Pointer to PCI device
3049  *
3050  * This callback is called when the error recovery driver tells us that
3051  * its OK to resume normal operation.
3052  */
3053 static void t3_io_resume(struct pci_dev *pdev)
3054 {
3055         struct adapter *adapter = pci_get_drvdata(pdev);
3056
3057         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3058                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3059
3060         rtnl_lock();
3061         t3_resume_ports(adapter);
3062         rtnl_unlock();
3063 }
3064
3065 static const struct pci_error_handlers t3_err_handler = {
3066         .error_detected = t3_io_error_detected,
3067         .slot_reset = t3_io_slot_reset,
3068         .resume = t3_io_resume,
3069 };
3070
3071 /*
3072  * Set the number of qsets based on the number of CPUs and the number of ports,
3073  * not to exceed the number of available qsets, assuming there are enough qsets
3074  * per port in HW.
3075  */
3076 static void set_nqsets(struct adapter *adap)
3077 {
3078         int i, j = 0;
3079         int num_cpus = netif_get_num_default_rss_queues();
3080         int hwports = adap->params.nports;
3081         int nqsets = adap->msix_nvectors - 1;
3082
3083         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3084                 if (hwports == 2 &&
3085                     (hwports * nqsets > SGE_QSETS ||
3086                      num_cpus >= nqsets / hwports))
3087                         nqsets /= hwports;
3088                 if (nqsets > num_cpus)
3089                         nqsets = num_cpus;
3090                 if (nqsets < 1 || hwports == 4)
3091                         nqsets = 1;
3092         } else {
3093                 nqsets = 1;
3094         }
3095
3096         for_each_port(adap, i) {
3097                 struct port_info *pi = adap2pinfo(adap, i);
3098
3099                 pi->first_qset = j;
3100                 pi->nqsets = nqsets;
3101                 j = pi->first_qset + nqsets;
3102
3103                 dev_info(&adap->pdev->dev,
3104                          "Port %d using %d queue sets.\n", i, nqsets);
3105         }
3106 }
3107
3108 static int cxgb_enable_msix(struct adapter *adap)
3109 {
3110         struct msix_entry entries[SGE_QSETS + 1];
3111         int vectors;
3112         int i;
3113
3114         vectors = ARRAY_SIZE(entries);
3115         for (i = 0; i < vectors; ++i)
3116                 entries[i].entry = i;
3117
3118         vectors = pci_enable_msix_range(adap->pdev, entries,
3119                                         adap->params.nports + 1, vectors);
3120         if (vectors < 0)
3121                 return vectors;
3122
3123         for (i = 0; i < vectors; ++i)
3124                 adap->msix_info[i].vec = entries[i].vector;
3125         adap->msix_nvectors = vectors;
3126
3127         return 0;
3128 }
3129
3130 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3131 {
3132         static const char *pci_variant[] = {
3133                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3134         };
3135
3136         int i;
3137         char buf[80];
3138
3139         if (is_pcie(adap))
3140                 snprintf(buf, sizeof(buf), "%s x%d",
3141                          pci_variant[adap->params.pci.variant],
3142                          adap->params.pci.width);
3143         else
3144                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3145                          pci_variant[adap->params.pci.variant],
3146                          adap->params.pci.speed, adap->params.pci.width);
3147
3148         for_each_port(adap, i) {
3149                 struct net_device *dev = adap->port[i];
3150                 const struct port_info *pi = netdev_priv(dev);
3151
3152                 if (!test_bit(i, &adap->registered_device_map))
3153                         continue;
3154                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3155                             ai->desc, pi->phy.desc,
3156                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3157                             (adap->flags & USING_MSIX) ? " MSI-X" :
3158                             (adap->flags & USING_MSI) ? " MSI" : "");
3159                 if (adap->name == dev->name && adap->params.vpd.mclk)
3160                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3161                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3162                                t3_mc7_size(&adap->pmtx) >> 20,
3163                                t3_mc7_size(&adap->pmrx) >> 20,
3164                                adap->params.vpd.sn);
3165         }
3166 }
3167
3168 static const struct net_device_ops cxgb_netdev_ops = {
3169         .ndo_open               = cxgb_open,
3170         .ndo_stop               = cxgb_close,
3171         .ndo_start_xmit         = t3_eth_xmit,
3172         .ndo_get_stats          = cxgb_get_stats,
3173         .ndo_validate_addr      = eth_validate_addr,
3174         .ndo_set_rx_mode        = cxgb_set_rxmode,
3175         .ndo_eth_ioctl          = cxgb_ioctl,
3176         .ndo_siocdevprivate     = cxgb_siocdevprivate,
3177         .ndo_change_mtu         = cxgb_change_mtu,
3178         .ndo_set_mac_address    = cxgb_set_mac_addr,
3179         .ndo_fix_features       = cxgb_fix_features,
3180         .ndo_set_features       = cxgb_set_features,
3181 #ifdef CONFIG_NET_POLL_CONTROLLER
3182         .ndo_poll_controller    = cxgb_netpoll,
3183 #endif
3184 };
3185
3186 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3187 {
3188         struct port_info *pi = netdev_priv(dev);
3189
3190         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3191         pi->iscsic.mac_addr[3] |= 0x80;
3192 }
3193
3194 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3195 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3196                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3197 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3198 {
3199         int i, err;
3200         resource_size_t mmio_start, mmio_len;
3201         const struct adapter_info *ai;
3202         struct adapter *adapter = NULL;
3203         struct port_info *pi;
3204
3205         if (!cxgb3_wq) {
3206                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3207                 if (!cxgb3_wq) {
3208                         pr_err("cannot initialize work queue\n");
3209                         return -ENOMEM;
3210                 }
3211         }
3212
3213         err = pci_enable_device(pdev);
3214         if (err) {
3215                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3216                 goto out;
3217         }
3218
3219         err = pci_request_regions(pdev, DRV_NAME);
3220         if (err) {
3221                 /* Just info, some other driver may have claimed the device. */
3222                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3223                 goto out_disable_device;
3224         }
3225
3226         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3227         if (err) {
3228                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3229                 goto out_release_regions;
3230         }
3231
3232         pci_set_master(pdev);
3233         pci_save_state(pdev);
3234
3235         mmio_start = pci_resource_start(pdev, 0);
3236         mmio_len = pci_resource_len(pdev, 0);
3237         ai = t3_get_adapter_info(ent->driver_data);
3238
3239         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3240         if (!adapter) {
3241                 err = -ENOMEM;
3242                 goto out_release_regions;
3243         }
3244
3245         adapter->nofail_skb =
3246                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3247         if (!adapter->nofail_skb) {
3248                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3249                 err = -ENOMEM;
3250                 goto out_free_adapter;
3251         }
3252
3253         adapter->regs = ioremap(mmio_start, mmio_len);
3254         if (!adapter->regs) {
3255                 dev_err(&pdev->dev, "cannot map device registers\n");
3256                 err = -ENOMEM;
3257                 goto out_free_adapter_nofail;
3258         }
3259
3260         adapter->pdev = pdev;
3261         adapter->name = pci_name(pdev);
3262         adapter->msg_enable = dflt_msg_enable;
3263         adapter->mmio_len = mmio_len;
3264
3265         mutex_init(&adapter->mdio_lock);
3266         spin_lock_init(&adapter->work_lock);
3267         spin_lock_init(&adapter->stats_lock);
3268
3269         INIT_LIST_HEAD(&adapter->adapter_list);
3270         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3271         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3272
3273         INIT_WORK(&adapter->db_full_task, db_full_task);
3274         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3275         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3276
3277         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3278
3279         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3280                 struct net_device *netdev;
3281
3282                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3283                 if (!netdev) {
3284                         err = -ENOMEM;
3285                         goto out_free_dev;
3286                 }
3287
3288                 SET_NETDEV_DEV(netdev, &pdev->dev);
3289
3290                 adapter->port[i] = netdev;
3291                 pi = netdev_priv(netdev);
3292                 pi->adapter = adapter;
3293                 pi->port_id = i;
3294                 netif_carrier_off(netdev);
3295                 netdev->irq = pdev->irq;
3296                 netdev->mem_start = mmio_start;
3297                 netdev->mem_end = mmio_start + mmio_len - 1;
3298                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3299                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3300                 netdev->features |= netdev->hw_features |
3301                                     NETIF_F_HW_VLAN_CTAG_TX;
3302                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3303
3304                 netdev->features |= NETIF_F_HIGHDMA;
3305
3306                 netdev->netdev_ops = &cxgb_netdev_ops;
3307                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3308                 netdev->min_mtu = 81;
3309                 netdev->max_mtu = ETH_MAX_MTU;
3310                 netdev->dev_port = pi->port_id;
3311         }
3312
3313         pci_set_drvdata(pdev, adapter);
3314         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3315                 err = -ENODEV;
3316                 goto out_free_dev;
3317         }
3318
3319         /*
3320          * The card is now ready to go.  If any errors occur during device
3321          * registration we do not fail the whole card but rather proceed only
3322          * with the ports we manage to register successfully.  However we must
3323          * register at least one net device.
3324          */
3325         for_each_port(adapter, i) {
3326                 err = register_netdev(adapter->port[i]);
3327                 if (err)
3328                         dev_warn(&pdev->dev,
3329                                  "cannot register net device %s, skipping\n",
3330                                  adapter->port[i]->name);
3331                 else {
3332                         /*
3333                          * Change the name we use for messages to the name of
3334                          * the first successfully registered interface.
3335                          */
3336                         if (!adapter->registered_device_map)
3337                                 adapter->name = adapter->port[i]->name;
3338
3339                         __set_bit(i, &adapter->registered_device_map);
3340                 }
3341         }
3342         if (!adapter->registered_device_map) {
3343                 dev_err(&pdev->dev, "could not register any net devices\n");
3344                 err = -ENODEV;
3345                 goto out_free_dev;
3346         }
3347
3348         for_each_port(adapter, i)
3349                 cxgb3_init_iscsi_mac(adapter->port[i]);
3350
3351         /* Driver's ready. Reflect it on LEDs */
3352         t3_led_ready(adapter);
3353
3354         if (is_offload(adapter)) {
3355                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3356                 cxgb3_adapter_ofld(adapter);
3357         }
3358
3359         /* See what interrupts we'll be using */
3360         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3361                 adapter->flags |= USING_MSIX;
3362         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3363                 adapter->flags |= USING_MSI;
3364
3365         set_nqsets(adapter);
3366
3367         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3368                                  &cxgb3_attr_group);
3369         if (err) {
3370                 dev_err(&pdev->dev, "cannot create sysfs group\n");
3371                 goto out_close_led;
3372         }
3373
3374         print_port_info(adapter, ai);
3375         return 0;
3376
3377 out_close_led:
3378         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3379
3380 out_free_dev:
3381         iounmap(adapter->regs);
3382         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3383                 if (adapter->port[i])
3384                         free_netdev(adapter->port[i]);
3385
3386 out_free_adapter_nofail:
3387         kfree_skb(adapter->nofail_skb);
3388
3389 out_free_adapter:
3390         kfree(adapter);
3391
3392 out_release_regions:
3393         pci_release_regions(pdev);
3394 out_disable_device:
3395         pci_disable_device(pdev);
3396 out:
3397         return err;
3398 }
3399
3400 static void remove_one(struct pci_dev *pdev)
3401 {
3402         struct adapter *adapter = pci_get_drvdata(pdev);
3403
3404         if (adapter) {
3405                 int i;
3406
3407                 t3_sge_stop(adapter);
3408                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3409                                    &cxgb3_attr_group);
3410
3411                 if (is_offload(adapter)) {
3412                         cxgb3_adapter_unofld(adapter);
3413                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3414                                      &adapter->open_device_map))
3415                                 offload_close(&adapter->tdev);
3416                 }
3417
3418                 for_each_port(adapter, i)
3419                     if (test_bit(i, &adapter->registered_device_map))
3420                         unregister_netdev(adapter->port[i]);
3421
3422                 t3_stop_sge_timers(adapter);
3423                 t3_free_sge_resources(adapter);
3424                 cxgb_disable_msi(adapter);
3425
3426                 for_each_port(adapter, i)
3427                         if (adapter->port[i])
3428                                 free_netdev(adapter->port[i]);
3429
3430                 iounmap(adapter->regs);
3431                 kfree_skb(adapter->nofail_skb);
3432                 kfree(adapter);
3433                 pci_release_regions(pdev);
3434                 pci_disable_device(pdev);
3435         }
3436 }
3437
3438 static struct pci_driver driver = {
3439         .name = DRV_NAME,
3440         .id_table = cxgb3_pci_tbl,
3441         .probe = init_one,
3442         .remove = remove_one,
3443         .err_handler = &t3_err_handler,
3444 };
3445
3446 static int __init cxgb3_init_module(void)
3447 {
3448         int ret;
3449
3450         cxgb3_offload_init();
3451
3452         ret = pci_register_driver(&driver);
3453         return ret;
3454 }
3455
3456 static void __exit cxgb3_cleanup_module(void)
3457 {
3458         pci_unregister_driver(&driver);
3459         if (cxgb3_wq)
3460                 destroy_workqueue(cxgb3_wq);
3461 }
3462
3463 module_init(cxgb3_init_module);
3464 module_exit(cxgb3_cleanup_module);