GNU Linux-libre 4.9.288-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/module.h>
11
12 #include <linux/stringify.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/errno.h>
16 #include <linux/ioport.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/bitops.h>
26 #include <linux/io.h>
27 #include <linux/irq.h>
28 #include <linux/delay.h>
29 #include <asm/byteorder.h>
30 #include <asm/page.h>
31 #include <linux/time.h>
32 #include <linux/mii.h>
33 #include <linux/if.h>
34 #include <linux/if_vlan.h>
35 #include <linux/rtc.h>
36 #include <net/ip.h>
37 #include <net/tcp.h>
38 #include <net/udp.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <net/udp_tunnel.h>
42 #ifdef CONFIG_NET_RX_BUSY_POLL
43 #include <net/busy_poll.h>
44 #endif
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52
53 #include "bnxt_hsi.h"
54 #include "bnxt.h"
55 #include "bnxt_sriov.h"
56 #include "bnxt_ethtool.h"
57
58 #define BNXT_TX_TIMEOUT         (5 * HZ)
59
60 static const char version[] =
61         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
62
63 MODULE_LICENSE("GPL");
64 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
65 MODULE_VERSION(DRV_MODULE_VERSION);
66
67 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
68 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
69 #define BNXT_RX_COPY_THRESH 256
70
71 #define BNXT_TX_PUSH_THRESH 164
72
73 enum board_idx {
74         BCM57301,
75         BCM57302,
76         BCM57304,
77         BCM57417_NPAR,
78         BCM58700,
79         BCM57311,
80         BCM57312,
81         BCM57402,
82         BCM57404,
83         BCM57406,
84         BCM57402_NPAR,
85         BCM57407,
86         BCM57412,
87         BCM57414,
88         BCM57416,
89         BCM57417,
90         BCM57412_NPAR,
91         BCM57314,
92         BCM57417_SFP,
93         BCM57416_SFP,
94         BCM57404_NPAR,
95         BCM57406_NPAR,
96         BCM57407_SFP,
97         BCM57407_NPAR,
98         BCM57414_NPAR,
99         BCM57416_NPAR,
100         BCM57452,
101         BCM57454,
102         NETXTREME_E_VF,
103         NETXTREME_C_VF,
104 };
105
106 /* indexed by enum above */
107 static const struct {
108         char *name;
109 } board_info[] = {
110         { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
111         { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
112         { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
113         { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
114         { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
115         { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
116         { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
117         { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
118         { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
119         { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
120         { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
121         { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
122         { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
123         { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
124         { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
125         { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
126         { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
127         { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
128         { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
129         { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
130         { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
131         { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
132         { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
133         { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
134         { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
135         { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
136         { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
137         { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
138         { "Broadcom NetXtreme-E Ethernet Virtual Function" },
139         { "Broadcom NetXtreme-C Ethernet Virtual Function" },
140 };
141
142 static const struct pci_device_id bnxt_pci_tbl[] = {
143         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
144         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
145         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
146         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
147         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
148         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
149         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
150         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
151         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
152         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
153         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
154         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
155         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
156         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
157         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
158         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
159         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
160         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
161         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
162         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
163         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
164         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
165         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
166         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
167         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
168         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
169         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
170         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
171         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
173         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
174         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
175 #ifdef CONFIG_BNXT_SRIOV
176         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
177         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
178         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
179         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
180         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
181         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
182 #endif
183         { 0 }
184 };
185
186 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
187
188 static const u16 bnxt_vf_req_snif[] = {
189         HWRM_FUNC_CFG,
190         HWRM_PORT_PHY_QCFG,
191         HWRM_CFA_L2_FILTER_ALLOC,
192 };
193
194 static const u16 bnxt_async_events_arr[] = {
195         HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
196         HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
197         HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
198         HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
199         HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
200 };
201
202 static bool bnxt_vf_pciid(enum board_idx idx)
203 {
204         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
205 }
206
207 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
208 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
209 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
210
211 #define BNXT_CP_DB_REARM(db, raw_cons)                                  \
212                 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
213
214 #define BNXT_CP_DB(db, raw_cons)                                        \
215                 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
216
217 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
218                 writel(DB_CP_IRQ_DIS_FLAGS, db)
219
220 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
221 {
222         /* Tell compiler to fetch tx indices from memory. */
223         barrier();
224
225         return bp->tx_ring_size -
226                 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
227 }
228
229 static const u16 bnxt_lhint_arr[] = {
230         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
231         TX_BD_FLAGS_LHINT_512_TO_1023,
232         TX_BD_FLAGS_LHINT_1024_TO_2047,
233         TX_BD_FLAGS_LHINT_1024_TO_2047,
234         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
235         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
236         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
237         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
238         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
243         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
244         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
245         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
246         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
247         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
248         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
249 };
250
251 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
252 {
253         struct bnxt *bp = netdev_priv(dev);
254         struct tx_bd *txbd;
255         struct tx_bd_ext *txbd1;
256         struct netdev_queue *txq;
257         int i;
258         dma_addr_t mapping;
259         unsigned int length, pad = 0;
260         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
261         u16 prod, last_frag;
262         struct pci_dev *pdev = bp->pdev;
263         struct bnxt_tx_ring_info *txr;
264         struct bnxt_sw_tx_bd *tx_buf;
265
266         i = skb_get_queue_mapping(skb);
267         if (unlikely(i >= bp->tx_nr_rings)) {
268                 dev_kfree_skb_any(skb);
269                 return NETDEV_TX_OK;
270         }
271
272         txr = &bp->tx_ring[i];
273         txq = netdev_get_tx_queue(dev, i);
274         prod = txr->tx_prod;
275
276         free_size = bnxt_tx_avail(bp, txr);
277         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
278                 netif_tx_stop_queue(txq);
279                 return NETDEV_TX_BUSY;
280         }
281
282         length = skb->len;
283         len = skb_headlen(skb);
284         last_frag = skb_shinfo(skb)->nr_frags;
285
286         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
287
288         txbd->tx_bd_opaque = prod;
289
290         tx_buf = &txr->tx_buf_ring[prod];
291         tx_buf->skb = skb;
292         tx_buf->nr_frags = last_frag;
293
294         vlan_tag_flags = 0;
295         cfa_action = 0;
296         if (skb_vlan_tag_present(skb)) {
297                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
298                                  skb_vlan_tag_get(skb);
299                 /* Currently supports 8021Q, 8021AD vlan offloads
300                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
301                  */
302                 if (skb->vlan_proto == htons(ETH_P_8021Q))
303                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
304         }
305
306         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
307                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
308                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
309                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
310                 void *pdata = tx_push_buf->data;
311                 u64 *end;
312                 int j, push_len;
313
314                 /* Set COAL_NOW to be ready quickly for the next push */
315                 tx_push->tx_bd_len_flags_type =
316                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
317                                         TX_BD_TYPE_LONG_TX_BD |
318                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
319                                         TX_BD_FLAGS_COAL_NOW |
320                                         TX_BD_FLAGS_PACKET_END |
321                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
322
323                 if (skb->ip_summed == CHECKSUM_PARTIAL)
324                         tx_push1->tx_bd_hsize_lflags =
325                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
326                 else
327                         tx_push1->tx_bd_hsize_lflags = 0;
328
329                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
330                 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
331
332                 end = pdata + length;
333                 end = PTR_ALIGN(end, 8) - 1;
334                 *end = 0;
335
336                 skb_copy_from_linear_data(skb, pdata, len);
337                 pdata += len;
338                 for (j = 0; j < last_frag; j++) {
339                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
340                         void *fptr;
341
342                         fptr = skb_frag_address_safe(frag);
343                         if (!fptr)
344                                 goto normal_tx;
345
346                         memcpy(pdata, fptr, skb_frag_size(frag));
347                         pdata += skb_frag_size(frag);
348                 }
349
350                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
351                 txbd->tx_bd_haddr = txr->data_mapping;
352                 prod = NEXT_TX(prod);
353                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
354                 memcpy(txbd, tx_push1, sizeof(*txbd));
355                 prod = NEXT_TX(prod);
356                 tx_push->doorbell =
357                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
358                 txr->tx_prod = prod;
359
360                 tx_buf->is_push = 1;
361                 netdev_tx_sent_queue(txq, skb->len);
362                 wmb();  /* Sync is_push and byte queue before pushing data */
363
364                 push_len = (length + sizeof(*tx_push) + 7) / 8;
365                 if (push_len > 16) {
366                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
367                         __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
368                                          (push_len - 16) << 1);
369                 } else {
370                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
371                                          push_len);
372                 }
373
374                 goto tx_done;
375         }
376
377 normal_tx:
378         if (length < BNXT_MIN_PKT_SIZE) {
379                 pad = BNXT_MIN_PKT_SIZE - length;
380                 if (skb_pad(skb, pad)) {
381                         /* SKB already freed. */
382                         tx_buf->skb = NULL;
383                         return NETDEV_TX_OK;
384                 }
385                 length = BNXT_MIN_PKT_SIZE;
386         }
387
388         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
389
390         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
391                 dev_kfree_skb_any(skb);
392                 tx_buf->skb = NULL;
393                 return NETDEV_TX_OK;
394         }
395
396         dma_unmap_addr_set(tx_buf, mapping, mapping);
397         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
398                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
399
400         txbd->tx_bd_haddr = cpu_to_le64(mapping);
401
402         prod = NEXT_TX(prod);
403         txbd1 = (struct tx_bd_ext *)
404                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
405
406         txbd1->tx_bd_hsize_lflags = 0;
407         if (skb_is_gso(skb)) {
408                 u32 hdr_len;
409
410                 if (skb->encapsulation)
411                         hdr_len = skb_inner_network_offset(skb) +
412                                 skb_inner_network_header_len(skb) +
413                                 inner_tcp_hdrlen(skb);
414                 else
415                         hdr_len = skb_transport_offset(skb) +
416                                 tcp_hdrlen(skb);
417
418                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
419                                         TX_BD_FLAGS_T_IPID |
420                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
421                 length = skb_shinfo(skb)->gso_size;
422                 txbd1->tx_bd_mss = cpu_to_le32(length);
423                 length += hdr_len;
424         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
425                 txbd1->tx_bd_hsize_lflags =
426                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
427                 txbd1->tx_bd_mss = 0;
428         }
429
430         length >>= 9;
431         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
432                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
433                                      skb->len);
434                 i = 0;
435                 goto tx_dma_error;
436         }
437         flags |= bnxt_lhint_arr[length];
438         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
439
440         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
441         txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
442         for (i = 0; i < last_frag; i++) {
443                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
444
445                 prod = NEXT_TX(prod);
446                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
447
448                 len = skb_frag_size(frag);
449                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
450                                            DMA_TO_DEVICE);
451
452                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
453                         goto tx_dma_error;
454
455                 tx_buf = &txr->tx_buf_ring[prod];
456                 dma_unmap_addr_set(tx_buf, mapping, mapping);
457
458                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
459
460                 flags = len << TX_BD_LEN_SHIFT;
461                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
462         }
463
464         flags &= ~TX_BD_LEN;
465         txbd->tx_bd_len_flags_type =
466                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
467                             TX_BD_FLAGS_PACKET_END);
468
469         netdev_tx_sent_queue(txq, skb->len);
470
471         /* Sync BD data before updating doorbell */
472         wmb();
473
474         prod = NEXT_TX(prod);
475         txr->tx_prod = prod;
476
477         writel(DB_KEY_TX | prod, txr->tx_doorbell);
478         writel(DB_KEY_TX | prod, txr->tx_doorbell);
479
480 tx_done:
481
482         mmiowb();
483
484         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
485                 netif_tx_stop_queue(txq);
486
487                 /* netif_tx_stop_queue() must be done before checking
488                  * tx index in bnxt_tx_avail() below, because in
489                  * bnxt_tx_int(), we update tx index before checking for
490                  * netif_tx_queue_stopped().
491                  */
492                 smp_mb();
493                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
494                         netif_tx_wake_queue(txq);
495         }
496         return NETDEV_TX_OK;
497
498 tx_dma_error:
499         last_frag = i;
500
501         /* start back at beginning and unmap skb */
502         prod = txr->tx_prod;
503         tx_buf = &txr->tx_buf_ring[prod];
504         tx_buf->skb = NULL;
505         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
506                          skb_headlen(skb), PCI_DMA_TODEVICE);
507         prod = NEXT_TX(prod);
508
509         /* unmap remaining mapped pages */
510         for (i = 0; i < last_frag; i++) {
511                 prod = NEXT_TX(prod);
512                 tx_buf = &txr->tx_buf_ring[prod];
513                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
514                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
515                                PCI_DMA_TODEVICE);
516         }
517
518         dev_kfree_skb_any(skb);
519         return NETDEV_TX_OK;
520 }
521
522 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
523 {
524         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
525         int index = txr - &bp->tx_ring[0];
526         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
527         u16 cons = txr->tx_cons;
528         struct pci_dev *pdev = bp->pdev;
529         int i;
530         unsigned int tx_bytes = 0;
531
532         for (i = 0; i < nr_pkts; i++) {
533                 struct bnxt_sw_tx_bd *tx_buf;
534                 struct sk_buff *skb;
535                 int j, last;
536
537                 tx_buf = &txr->tx_buf_ring[cons];
538                 cons = NEXT_TX(cons);
539                 skb = tx_buf->skb;
540                 tx_buf->skb = NULL;
541
542                 if (tx_buf->is_push) {
543                         tx_buf->is_push = 0;
544                         goto next_tx_int;
545                 }
546
547                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
548                                  skb_headlen(skb), PCI_DMA_TODEVICE);
549                 last = tx_buf->nr_frags;
550
551                 for (j = 0; j < last; j++) {
552                         cons = NEXT_TX(cons);
553                         tx_buf = &txr->tx_buf_ring[cons];
554                         dma_unmap_page(
555                                 &pdev->dev,
556                                 dma_unmap_addr(tx_buf, mapping),
557                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
558                                 PCI_DMA_TODEVICE);
559                 }
560
561 next_tx_int:
562                 cons = NEXT_TX(cons);
563
564                 tx_bytes += skb->len;
565                 dev_kfree_skb_any(skb);
566         }
567
568         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
569         txr->tx_cons = cons;
570
571         /* Need to make the tx_cons update visible to bnxt_start_xmit()
572          * before checking for netif_tx_queue_stopped().  Without the
573          * memory barrier, there is a small possibility that bnxt_start_xmit()
574          * will miss it and cause the queue to be stopped forever.
575          */
576         smp_mb();
577
578         if (unlikely(netif_tx_queue_stopped(txq)) &&
579             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
580                 __netif_tx_lock(txq, smp_processor_id());
581                 if (netif_tx_queue_stopped(txq) &&
582                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
583                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
584                         netif_tx_wake_queue(txq);
585                 __netif_tx_unlock(txq);
586         }
587 }
588
589 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
590                                        gfp_t gfp)
591 {
592         u8 *data;
593         struct pci_dev *pdev = bp->pdev;
594
595         data = kmalloc(bp->rx_buf_size, gfp);
596         if (!data)
597                 return NULL;
598
599         *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
600                                   bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
601
602         if (dma_mapping_error(&pdev->dev, *mapping)) {
603                 kfree(data);
604                 data = NULL;
605         }
606         return data;
607 }
608
609 static inline int bnxt_alloc_rx_data(struct bnxt *bp,
610                                      struct bnxt_rx_ring_info *rxr,
611                                      u16 prod, gfp_t gfp)
612 {
613         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
614         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
615         u8 *data;
616         dma_addr_t mapping;
617
618         data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
619         if (!data)
620                 return -ENOMEM;
621
622         rx_buf->data = data;
623         dma_unmap_addr_set(rx_buf, mapping, mapping);
624
625         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
626
627         return 0;
628 }
629
630 static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
631                                u8 *data)
632 {
633         u16 prod = rxr->rx_prod;
634         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
635         struct rx_bd *cons_bd, *prod_bd;
636
637         prod_rx_buf = &rxr->rx_buf_ring[prod];
638         cons_rx_buf = &rxr->rx_buf_ring[cons];
639
640         prod_rx_buf->data = data;
641
642         dma_unmap_addr_set(prod_rx_buf, mapping,
643                            dma_unmap_addr(cons_rx_buf, mapping));
644
645         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
646         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
647
648         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
649 }
650
651 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
652 {
653         u16 next, max = rxr->rx_agg_bmap_size;
654
655         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
656         if (next >= max)
657                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
658         return next;
659 }
660
661 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
662                                      struct bnxt_rx_ring_info *rxr,
663                                      u16 prod, gfp_t gfp)
664 {
665         struct rx_bd *rxbd =
666                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
667         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
668         struct pci_dev *pdev = bp->pdev;
669         struct page *page;
670         dma_addr_t mapping;
671         u16 sw_prod = rxr->rx_sw_agg_prod;
672         unsigned int offset = 0;
673
674         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
675                 page = rxr->rx_page;
676                 if (!page) {
677                         page = alloc_page(gfp);
678                         if (!page)
679                                 return -ENOMEM;
680                         rxr->rx_page = page;
681                         rxr->rx_page_offset = 0;
682                 }
683                 offset = rxr->rx_page_offset;
684                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
685                 if (rxr->rx_page_offset == PAGE_SIZE)
686                         rxr->rx_page = NULL;
687                 else
688                         get_page(page);
689         } else {
690                 page = alloc_page(gfp);
691                 if (!page)
692                         return -ENOMEM;
693         }
694
695         mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
696                                PCI_DMA_FROMDEVICE);
697         if (dma_mapping_error(&pdev->dev, mapping)) {
698                 __free_page(page);
699                 return -EIO;
700         }
701
702         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
703                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
704
705         __set_bit(sw_prod, rxr->rx_agg_bmap);
706         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
707         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
708
709         rx_agg_buf->page = page;
710         rx_agg_buf->offset = offset;
711         rx_agg_buf->mapping = mapping;
712         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
713         rxbd->rx_bd_opaque = sw_prod;
714         return 0;
715 }
716
717 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
718                                    u32 agg_bufs)
719 {
720         struct bnxt *bp = bnapi->bp;
721         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
722         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
723         u16 prod = rxr->rx_agg_prod;
724         u16 sw_prod = rxr->rx_sw_agg_prod;
725         u32 i;
726
727         for (i = 0; i < agg_bufs; i++) {
728                 u16 cons;
729                 struct rx_agg_cmp *agg;
730                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
731                 struct rx_bd *prod_bd;
732                 struct page *page;
733
734                 agg = (struct rx_agg_cmp *)
735                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
736                 cons = agg->rx_agg_cmp_opaque;
737                 __clear_bit(cons, rxr->rx_agg_bmap);
738
739                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
740                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
741
742                 __set_bit(sw_prod, rxr->rx_agg_bmap);
743                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
744                 cons_rx_buf = &rxr->rx_agg_ring[cons];
745
746                 /* It is possible for sw_prod to be equal to cons, so
747                  * set cons_rx_buf->page to NULL first.
748                  */
749                 page = cons_rx_buf->page;
750                 cons_rx_buf->page = NULL;
751                 prod_rx_buf->page = page;
752                 prod_rx_buf->offset = cons_rx_buf->offset;
753
754                 prod_rx_buf->mapping = cons_rx_buf->mapping;
755
756                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
757
758                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
759                 prod_bd->rx_bd_opaque = sw_prod;
760
761                 prod = NEXT_RX_AGG(prod);
762                 sw_prod = NEXT_RX_AGG(sw_prod);
763                 cp_cons = NEXT_CMP(cp_cons);
764         }
765         rxr->rx_agg_prod = prod;
766         rxr->rx_sw_agg_prod = sw_prod;
767 }
768
769 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
770                                    struct bnxt_rx_ring_info *rxr, u16 cons,
771                                    u16 prod, u8 *data, dma_addr_t dma_addr,
772                                    unsigned int len)
773 {
774         int err;
775         struct sk_buff *skb;
776
777         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
778         if (unlikely(err)) {
779                 bnxt_reuse_rx_data(rxr, cons, data);
780                 return NULL;
781         }
782
783         skb = build_skb(data, 0);
784         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
785                          PCI_DMA_FROMDEVICE);
786         if (!skb) {
787                 kfree(data);
788                 return NULL;
789         }
790
791         skb_reserve(skb, BNXT_RX_OFFSET);
792         skb_put(skb, len);
793         return skb;
794 }
795
796 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
797                                      struct sk_buff *skb, u16 cp_cons,
798                                      u32 agg_bufs)
799 {
800         struct pci_dev *pdev = bp->pdev;
801         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
802         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
803         u16 prod = rxr->rx_agg_prod;
804         u32 i;
805
806         for (i = 0; i < agg_bufs; i++) {
807                 u16 cons, frag_len;
808                 struct rx_agg_cmp *agg;
809                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
810                 struct page *page;
811                 dma_addr_t mapping;
812
813                 agg = (struct rx_agg_cmp *)
814                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
815                 cons = agg->rx_agg_cmp_opaque;
816                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
817                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
818
819                 cons_rx_buf = &rxr->rx_agg_ring[cons];
820                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
821                                    cons_rx_buf->offset, frag_len);
822                 __clear_bit(cons, rxr->rx_agg_bmap);
823
824                 /* It is possible for bnxt_alloc_rx_page() to allocate
825                  * a sw_prod index that equals the cons index, so we
826                  * need to clear the cons entry now.
827                  */
828                 mapping = dma_unmap_addr(cons_rx_buf, mapping);
829                 page = cons_rx_buf->page;
830                 cons_rx_buf->page = NULL;
831
832                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
833                         struct skb_shared_info *shinfo;
834                         unsigned int nr_frags;
835
836                         shinfo = skb_shinfo(skb);
837                         nr_frags = --shinfo->nr_frags;
838                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
839
840                         dev_kfree_skb(skb);
841
842                         cons_rx_buf->page = page;
843
844                         /* Update prod since possibly some pages have been
845                          * allocated already.
846                          */
847                         rxr->rx_agg_prod = prod;
848                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
849                         return NULL;
850                 }
851
852                 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
853                                PCI_DMA_FROMDEVICE);
854
855                 skb->data_len += frag_len;
856                 skb->len += frag_len;
857                 skb->truesize += PAGE_SIZE;
858
859                 prod = NEXT_RX_AGG(prod);
860                 cp_cons = NEXT_CMP(cp_cons);
861         }
862         rxr->rx_agg_prod = prod;
863         return skb;
864 }
865
866 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
867                                u8 agg_bufs, u32 *raw_cons)
868 {
869         u16 last;
870         struct rx_agg_cmp *agg;
871
872         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
873         last = RING_CMP(*raw_cons);
874         agg = (struct rx_agg_cmp *)
875                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
876         return RX_AGG_CMP_VALID(agg, *raw_cons);
877 }
878
879 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
880                                             unsigned int len,
881                                             dma_addr_t mapping)
882 {
883         struct bnxt *bp = bnapi->bp;
884         struct pci_dev *pdev = bp->pdev;
885         struct sk_buff *skb;
886
887         skb = napi_alloc_skb(&bnapi->napi, len);
888         if (!skb)
889                 return NULL;
890
891         dma_sync_single_for_cpu(&pdev->dev, mapping,
892                                 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
893
894         memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
895
896         dma_sync_single_for_device(&pdev->dev, mapping,
897                                    bp->rx_copy_thresh,
898                                    PCI_DMA_FROMDEVICE);
899
900         skb_put(skb, len);
901         return skb;
902 }
903
904 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
905                            u32 *raw_cons, void *cmp)
906 {
907         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
908         struct rx_cmp *rxcmp = cmp;
909         u32 tmp_raw_cons = *raw_cons;
910         u8 cmp_type, agg_bufs = 0;
911
912         cmp_type = RX_CMP_TYPE(rxcmp);
913
914         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
915                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
916                             RX_CMP_AGG_BUFS) >>
917                            RX_CMP_AGG_BUFS_SHIFT;
918         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
919                 struct rx_tpa_end_cmp *tpa_end = cmp;
920
921                 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
922                             RX_TPA_END_CMP_AGG_BUFS) >>
923                            RX_TPA_END_CMP_AGG_BUFS_SHIFT;
924         }
925
926         if (agg_bufs) {
927                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
928                         return -EBUSY;
929         }
930         *raw_cons = tmp_raw_cons;
931         return 0;
932 }
933
934 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
935 {
936         if (!rxr->bnapi->in_reset) {
937                 rxr->bnapi->in_reset = true;
938                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
939                 schedule_work(&bp->sp_task);
940         }
941         rxr->rx_next_cons = 0xffff;
942 }
943
944 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
945                            struct rx_tpa_start_cmp *tpa_start,
946                            struct rx_tpa_start_cmp_ext *tpa_start1)
947 {
948         u8 agg_id = TPA_START_AGG_ID(tpa_start);
949         u16 cons, prod;
950         struct bnxt_tpa_info *tpa_info;
951         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
952         struct rx_bd *prod_bd;
953         dma_addr_t mapping;
954
955         cons = tpa_start->rx_tpa_start_cmp_opaque;
956         prod = rxr->rx_prod;
957         cons_rx_buf = &rxr->rx_buf_ring[cons];
958         prod_rx_buf = &rxr->rx_buf_ring[prod];
959         tpa_info = &rxr->rx_tpa[agg_id];
960
961         if (unlikely(cons != rxr->rx_next_cons)) {
962                 netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
963                             cons, rxr->rx_next_cons);
964                 bnxt_sched_reset(bp, rxr);
965                 return;
966         }
967
968         prod_rx_buf->data = tpa_info->data;
969
970         mapping = tpa_info->mapping;
971         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
972
973         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
974
975         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
976
977         tpa_info->data = cons_rx_buf->data;
978         cons_rx_buf->data = NULL;
979         tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
980
981         tpa_info->len =
982                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
983                                 RX_TPA_START_CMP_LEN_SHIFT;
984         if (likely(TPA_START_HASH_VALID(tpa_start))) {
985                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
986
987                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
988                 tpa_info->gso_type = SKB_GSO_TCPV4;
989                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
990                 if (hash_type == 3)
991                         tpa_info->gso_type = SKB_GSO_TCPV6;
992                 tpa_info->rss_hash =
993                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
994         } else {
995                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
996                 tpa_info->gso_type = 0;
997                 if (netif_msg_rx_err(bp))
998                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
999         }
1000         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1001         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1002         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1003
1004         rxr->rx_prod = NEXT_RX(prod);
1005         cons = NEXT_RX(cons);
1006         rxr->rx_next_cons = NEXT_RX(cons);
1007         cons_rx_buf = &rxr->rx_buf_ring[cons];
1008
1009         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1010         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1011         cons_rx_buf->data = NULL;
1012 }
1013
1014 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1015                            u16 cp_cons, u32 agg_bufs)
1016 {
1017         if (agg_bufs)
1018                 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1019 }
1020
1021 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1022                                            int payload_off, int tcp_ts,
1023                                            struct sk_buff *skb)
1024 {
1025 #ifdef CONFIG_INET
1026         struct tcphdr *th;
1027         int len, nw_off;
1028         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1029         u32 hdr_info = tpa_info->hdr_info;
1030         bool loopback = false;
1031
1032         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1033         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1034         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1035
1036         /* If the packet is an internal loopback packet, the offsets will
1037          * have an extra 4 bytes.
1038          */
1039         if (inner_mac_off == 4) {
1040                 loopback = true;
1041         } else if (inner_mac_off > 4) {
1042                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1043                                             ETH_HLEN - 2));
1044
1045                 /* We only support inner iPv4/ipv6.  If we don't see the
1046                  * correct protocol ID, it must be a loopback packet where
1047                  * the offsets are off by 4.
1048                  */
1049                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1050                         loopback = true;
1051         }
1052         if (loopback) {
1053                 /* internal loopback packet, subtract all offsets by 4 */
1054                 inner_ip_off -= 4;
1055                 inner_mac_off -= 4;
1056                 outer_ip_off -= 4;
1057         }
1058
1059         nw_off = inner_ip_off - ETH_HLEN;
1060         skb_set_network_header(skb, nw_off);
1061         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1062                 struct ipv6hdr *iph = ipv6_hdr(skb);
1063
1064                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1065                 len = skb->len - skb_transport_offset(skb);
1066                 th = tcp_hdr(skb);
1067                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1068         } else {
1069                 struct iphdr *iph = ip_hdr(skb);
1070
1071                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1072                 len = skb->len - skb_transport_offset(skb);
1073                 th = tcp_hdr(skb);
1074                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1075         }
1076
1077         if (inner_mac_off) { /* tunnel */
1078                 struct udphdr *uh = NULL;
1079                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1080                                             ETH_HLEN - 2));
1081
1082                 if (proto == htons(ETH_P_IP)) {
1083                         struct iphdr *iph = (struct iphdr *)skb->data;
1084
1085                         if (iph->protocol == IPPROTO_UDP)
1086                                 uh = (struct udphdr *)(iph + 1);
1087                 } else {
1088                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1089
1090                         if (iph->nexthdr == IPPROTO_UDP)
1091                                 uh = (struct udphdr *)(iph + 1);
1092                 }
1093                 if (uh) {
1094                         if (uh->check)
1095                                 skb_shinfo(skb)->gso_type |=
1096                                         SKB_GSO_UDP_TUNNEL_CSUM;
1097                         else
1098                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1099                 }
1100         }
1101 #endif
1102         return skb;
1103 }
1104
1105 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1106 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1107
1108 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1109                                            int payload_off, int tcp_ts,
1110                                            struct sk_buff *skb)
1111 {
1112 #ifdef CONFIG_INET
1113         struct tcphdr *th;
1114         int len, nw_off, tcp_opt_len = 0;
1115
1116         if (tcp_ts)
1117                 tcp_opt_len = 12;
1118
1119         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1120                 struct iphdr *iph;
1121
1122                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1123                          ETH_HLEN;
1124                 skb_set_network_header(skb, nw_off);
1125                 iph = ip_hdr(skb);
1126                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1127                 len = skb->len - skb_transport_offset(skb);
1128                 th = tcp_hdr(skb);
1129                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1130         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1131                 struct ipv6hdr *iph;
1132
1133                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1134                          ETH_HLEN;
1135                 skb_set_network_header(skb, nw_off);
1136                 iph = ipv6_hdr(skb);
1137                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1138                 len = skb->len - skb_transport_offset(skb);
1139                 th = tcp_hdr(skb);
1140                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1141         } else {
1142                 dev_kfree_skb_any(skb);
1143                 return NULL;
1144         }
1145         tcp_gro_complete(skb);
1146
1147         if (nw_off) { /* tunnel */
1148                 struct udphdr *uh = NULL;
1149
1150                 if (skb->protocol == htons(ETH_P_IP)) {
1151                         struct iphdr *iph = (struct iphdr *)skb->data;
1152
1153                         if (iph->protocol == IPPROTO_UDP)
1154                                 uh = (struct udphdr *)(iph + 1);
1155                 } else {
1156                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1157
1158                         if (iph->nexthdr == IPPROTO_UDP)
1159                                 uh = (struct udphdr *)(iph + 1);
1160                 }
1161                 if (uh) {
1162                         if (uh->check)
1163                                 skb_shinfo(skb)->gso_type |=
1164                                         SKB_GSO_UDP_TUNNEL_CSUM;
1165                         else
1166                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1167                 }
1168         }
1169 #endif
1170         return skb;
1171 }
1172
1173 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1174                                            struct bnxt_tpa_info *tpa_info,
1175                                            struct rx_tpa_end_cmp *tpa_end,
1176                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1177                                            struct sk_buff *skb)
1178 {
1179 #ifdef CONFIG_INET
1180         int payload_off;
1181         u16 segs;
1182
1183         segs = TPA_END_TPA_SEGS(tpa_end);
1184         if (segs == 1)
1185                 return skb;
1186
1187         NAPI_GRO_CB(skb)->count = segs;
1188         skb_shinfo(skb)->gso_size =
1189                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1190         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1191         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1192                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1193                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1194         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1195 #endif
1196         return skb;
1197 }
1198
1199 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1200                                            struct bnxt_napi *bnapi,
1201                                            u32 *raw_cons,
1202                                            struct rx_tpa_end_cmp *tpa_end,
1203                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1204                                            bool *agg_event)
1205 {
1206         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1207         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1208         u8 agg_id = TPA_END_AGG_ID(tpa_end);
1209         u8 *data, agg_bufs;
1210         u16 cp_cons = RING_CMP(*raw_cons);
1211         unsigned int len;
1212         struct bnxt_tpa_info *tpa_info;
1213         dma_addr_t mapping;
1214         struct sk_buff *skb;
1215
1216         if (unlikely(bnapi->in_reset)) {
1217                 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1218
1219                 if (rc < 0)
1220                         return ERR_PTR(-EBUSY);
1221                 return NULL;
1222         }
1223
1224         tpa_info = &rxr->rx_tpa[agg_id];
1225         data = tpa_info->data;
1226         prefetch(data);
1227         len = tpa_info->len;
1228         mapping = tpa_info->mapping;
1229
1230         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1231                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1232
1233         if (agg_bufs) {
1234                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1235                         return ERR_PTR(-EBUSY);
1236
1237                 *agg_event = true;
1238                 cp_cons = NEXT_CMP(cp_cons);
1239         }
1240
1241         if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
1242                 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1243                 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1244                             agg_bufs, (int)MAX_SKB_FRAGS);
1245                 return NULL;
1246         }
1247
1248         if (len <= bp->rx_copy_thresh) {
1249                 skb = bnxt_copy_skb(bnapi, data, len, mapping);
1250                 if (!skb) {
1251                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1252                         return NULL;
1253                 }
1254         } else {
1255                 u8 *new_data;
1256                 dma_addr_t new_mapping;
1257
1258                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1259                 if (!new_data) {
1260                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1261                         return NULL;
1262                 }
1263
1264                 tpa_info->data = new_data;
1265                 tpa_info->mapping = new_mapping;
1266
1267                 skb = build_skb(data, 0);
1268                 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
1269                                  PCI_DMA_FROMDEVICE);
1270
1271                 if (!skb) {
1272                         kfree(data);
1273                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1274                         return NULL;
1275                 }
1276                 skb_reserve(skb, BNXT_RX_OFFSET);
1277                 skb_put(skb, len);
1278         }
1279
1280         if (agg_bufs) {
1281                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1282                 if (!skb) {
1283                         /* Page reuse already handled by bnxt_rx_pages(). */
1284                         return NULL;
1285                 }
1286         }
1287         skb->protocol = eth_type_trans(skb, bp->dev);
1288
1289         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1290                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1291
1292         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1293             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1294                 u16 vlan_proto = tpa_info->metadata >>
1295                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1296                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
1297
1298                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1299         }
1300
1301         skb_checksum_none_assert(skb);
1302         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1303                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304                 skb->csum_level =
1305                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1306         }
1307
1308         if (TPA_END_GRO(tpa_end))
1309                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1310
1311         return skb;
1312 }
1313
1314 /* returns the following:
1315  * 1       - 1 packet successfully received
1316  * 0       - successful TPA_START, packet not completed yet
1317  * -EBUSY  - completion ring does not have all the agg buffers yet
1318  * -ENOMEM - packet aborted due to out of memory
1319  * -EIO    - packet aborted due to hw error indicated in BD
1320  */
1321 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1322                        bool *agg_event)
1323 {
1324         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1325         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1326         struct net_device *dev = bp->dev;
1327         struct rx_cmp *rxcmp;
1328         struct rx_cmp_ext *rxcmp1;
1329         u32 tmp_raw_cons = *raw_cons;
1330         u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1331         struct bnxt_sw_rx_bd *rx_buf;
1332         unsigned int len;
1333         u8 *data, agg_bufs, cmp_type;
1334         dma_addr_t dma_addr;
1335         struct sk_buff *skb;
1336         int rc = 0;
1337
1338         rxcmp = (struct rx_cmp *)
1339                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1340
1341         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1342         cp_cons = RING_CMP(tmp_raw_cons);
1343         rxcmp1 = (struct rx_cmp_ext *)
1344                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1345
1346         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1347                 return -EBUSY;
1348
1349         cmp_type = RX_CMP_TYPE(rxcmp);
1350
1351         prod = rxr->rx_prod;
1352
1353         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1354                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1355                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1356
1357                 goto next_rx_no_prod;
1358
1359         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1360                 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1361                                    (struct rx_tpa_end_cmp *)rxcmp,
1362                                    (struct rx_tpa_end_cmp_ext *)rxcmp1,
1363                                    agg_event);
1364
1365                 if (unlikely(IS_ERR(skb)))
1366                         return -EBUSY;
1367
1368                 rc = -ENOMEM;
1369                 if (likely(skb)) {
1370                         skb_record_rx_queue(skb, bnapi->index);
1371                         skb_mark_napi_id(skb, &bnapi->napi);
1372                         if (bnxt_busy_polling(bnapi))
1373                                 netif_receive_skb(skb);
1374                         else
1375                                 napi_gro_receive(&bnapi->napi, skb);
1376                         rc = 1;
1377                 }
1378                 goto next_rx_no_prod;
1379         }
1380
1381         cons = rxcmp->rx_cmp_opaque;
1382         if (unlikely(cons != rxr->rx_next_cons)) {
1383                 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1384
1385                 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1386                             cons, rxr->rx_next_cons);
1387                 bnxt_sched_reset(bp, rxr);
1388                 return rc1;
1389         }
1390         rx_buf = &rxr->rx_buf_ring[cons];
1391         data = rx_buf->data;
1392         prefetch(data);
1393
1394         agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1395                                 RX_CMP_AGG_BUFS_SHIFT;
1396
1397         if (agg_bufs) {
1398                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1399                         return -EBUSY;
1400
1401                 cp_cons = NEXT_CMP(cp_cons);
1402                 *agg_event = true;
1403         }
1404
1405         rx_buf->data = NULL;
1406         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1407                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1408
1409                 bnxt_reuse_rx_data(rxr, cons, data);
1410                 if (agg_bufs)
1411                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1412
1413                 rc = -EIO;
1414                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1415                         netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1416                         bnxt_sched_reset(bp, rxr);
1417                 }
1418                 goto next_rx;
1419         }
1420
1421         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1422         dma_addr = dma_unmap_addr(rx_buf, mapping);
1423
1424         if (len <= bp->rx_copy_thresh) {
1425                 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1426                 bnxt_reuse_rx_data(rxr, cons, data);
1427                 if (!skb) {
1428                         if (agg_bufs)
1429                                 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1430                         rc = -ENOMEM;
1431                         goto next_rx;
1432                 }
1433         } else {
1434                 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1435                 if (!skb) {
1436                         rc = -ENOMEM;
1437                         goto next_rx;
1438                 }
1439         }
1440
1441         if (agg_bufs) {
1442                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1443                 if (!skb) {
1444                         rc = -ENOMEM;
1445                         goto next_rx;
1446                 }
1447         }
1448
1449         if (RX_CMP_HASH_VALID(rxcmp)) {
1450                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1451                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1452
1453                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1454                 if (hash_type != 1 && hash_type != 3)
1455                         type = PKT_HASH_TYPE_L3;
1456                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1457         }
1458
1459         skb->protocol = eth_type_trans(skb, dev);
1460
1461         if ((rxcmp1->rx_cmp_flags2 &
1462              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1463             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1464                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1465                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
1466                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1467
1468                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1469         }
1470
1471         skb_checksum_none_assert(skb);
1472         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1473                 if (dev->features & NETIF_F_RXCSUM) {
1474                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1475                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1476                 }
1477         } else {
1478                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1479                         if (dev->features & NETIF_F_RXCSUM)
1480                                 cpr->rx_l4_csum_errors++;
1481                 }
1482         }
1483
1484         skb_record_rx_queue(skb, bnapi->index);
1485         skb_mark_napi_id(skb, &bnapi->napi);
1486         if (bnxt_busy_polling(bnapi))
1487                 netif_receive_skb(skb);
1488         else
1489                 napi_gro_receive(&bnapi->napi, skb);
1490         rc = 1;
1491
1492 next_rx:
1493         rxr->rx_prod = NEXT_RX(prod);
1494         rxr->rx_next_cons = NEXT_RX(cons);
1495
1496 next_rx_no_prod:
1497         *raw_cons = tmp_raw_cons;
1498
1499         return rc;
1500 }
1501
1502 #define BNXT_GET_EVENT_PORT(data)       \
1503         ((data) &                               \
1504          HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1505
1506 static int bnxt_async_event_process(struct bnxt *bp,
1507                                     struct hwrm_async_event_cmpl *cmpl)
1508 {
1509         u16 event_id = le16_to_cpu(cmpl->event_id);
1510
1511         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1512         switch (event_id) {
1513         case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1514                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1515                 struct bnxt_link_info *link_info = &bp->link_info;
1516
1517                 if (BNXT_VF(bp))
1518                         goto async_event_process_exit;
1519
1520                 /* print unsupported speed warning in forced speed mode only */
1521                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1522                     (data1 & 0x20000)) {
1523                         u16 fw_speed = link_info->force_link_speed;
1524                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1525
1526                         if (speed != SPEED_UNKNOWN)
1527                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1528                                             speed);
1529                 }
1530                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1531                 /* fall thru */
1532         }
1533         case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1534                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1535                 break;
1536         case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1537                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1538                 break;
1539         case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1540                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1541                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1542
1543                 if (BNXT_VF(bp))
1544                         break;
1545
1546                 if (bp->pf.port_id != port_id)
1547                         break;
1548
1549                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1550                 break;
1551         }
1552         case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1553                 if (BNXT_PF(bp))
1554                         goto async_event_process_exit;
1555                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1556                 break;
1557         default:
1558                 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1559                            event_id);
1560                 goto async_event_process_exit;
1561         }
1562         schedule_work(&bp->sp_task);
1563 async_event_process_exit:
1564         return 0;
1565 }
1566
1567 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1568 {
1569         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1570         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1571         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1572                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1573
1574         switch (cmpl_type) {
1575         case CMPL_BASE_TYPE_HWRM_DONE:
1576                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1577                 if (seq_id == bp->hwrm_intr_seq_id)
1578                         bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1579                 else
1580                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1581                 break;
1582
1583         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1584                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1585
1586                 if ((vf_id < bp->pf.first_vf_id) ||
1587                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1588                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1589                                    vf_id);
1590                         return -EINVAL;
1591                 }
1592
1593                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1594                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1595                 schedule_work(&bp->sp_task);
1596                 break;
1597
1598         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1599                 bnxt_async_event_process(bp,
1600                                          (struct hwrm_async_event_cmpl *)txcmp);
1601
1602         default:
1603                 break;
1604         }
1605
1606         return 0;
1607 }
1608
1609 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1610 {
1611         struct bnxt_napi *bnapi = dev_instance;
1612         struct bnxt *bp = bnapi->bp;
1613         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1614         u32 cons = RING_CMP(cpr->cp_raw_cons);
1615
1616         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1617         napi_schedule(&bnapi->napi);
1618         return IRQ_HANDLED;
1619 }
1620
1621 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1622 {
1623         u32 raw_cons = cpr->cp_raw_cons;
1624         u16 cons = RING_CMP(raw_cons);
1625         struct tx_cmp *txcmp;
1626
1627         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1628
1629         return TX_CMP_VALID(txcmp, raw_cons);
1630 }
1631
1632 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1633 {
1634         struct bnxt_napi *bnapi = dev_instance;
1635         struct bnxt *bp = bnapi->bp;
1636         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1637         u32 cons = RING_CMP(cpr->cp_raw_cons);
1638         u32 int_status;
1639
1640         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1641
1642         if (!bnxt_has_work(bp, cpr)) {
1643                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1644                 /* return if erroneous interrupt */
1645                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1646                         return IRQ_NONE;
1647         }
1648
1649         /* disable ring IRQ */
1650         BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1651
1652         /* Return here if interrupt is shared and is disabled. */
1653         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1654                 return IRQ_HANDLED;
1655
1656         napi_schedule(&bnapi->napi);
1657         return IRQ_HANDLED;
1658 }
1659
1660 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1661 {
1662         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1663         u32 raw_cons = cpr->cp_raw_cons;
1664         u32 cons;
1665         int tx_pkts = 0;
1666         int rx_pkts = 0;
1667         bool rx_event = false;
1668         bool agg_event = false;
1669         struct tx_cmp *txcmp;
1670
1671         while (1) {
1672                 int rc;
1673
1674                 cons = RING_CMP(raw_cons);
1675                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1676
1677                 if (!TX_CMP_VALID(txcmp, raw_cons))
1678                         break;
1679
1680                 /* The valid test of the entry must be done first before
1681                  * reading any further.
1682                  */
1683                 dma_rmb();
1684                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1685                         tx_pkts++;
1686                         /* return full budget so NAPI will complete. */
1687                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1688                                 rx_pkts = budget;
1689                                 raw_cons = NEXT_RAW_CMP(raw_cons);
1690                                 break;
1691                         }
1692                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1693                         rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1694                         if (likely(rc >= 0))
1695                                 rx_pkts += rc;
1696                         else if (rc == -EBUSY)  /* partial completion */
1697                                 break;
1698                         rx_event = true;
1699                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1700                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1701                                     (TX_CMP_TYPE(txcmp) ==
1702                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1703                                     (TX_CMP_TYPE(txcmp) ==
1704                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1705                         bnxt_hwrm_handler(bp, txcmp);
1706                 }
1707                 raw_cons = NEXT_RAW_CMP(raw_cons);
1708
1709                 if (rx_pkts && rx_pkts == budget)
1710                         break;
1711         }
1712
1713         cpr->cp_raw_cons = raw_cons;
1714         /* ACK completion ring before freeing tx ring and producing new
1715          * buffers in rx/agg rings to prevent overflowing the completion
1716          * ring.
1717          */
1718         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1719
1720         if (tx_pkts)
1721                 bnxt_tx_int(bp, bnapi, tx_pkts);
1722
1723         if (rx_event) {
1724                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1725
1726                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1727                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1728                 if (agg_event) {
1729                         writel(DB_KEY_RX | rxr->rx_agg_prod,
1730                                rxr->rx_agg_doorbell);
1731                         writel(DB_KEY_RX | rxr->rx_agg_prod,
1732                                rxr->rx_agg_doorbell);
1733                 }
1734         }
1735         return rx_pkts;
1736 }
1737
1738 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1739 {
1740         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1741         struct bnxt *bp = bnapi->bp;
1742         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1743         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1744         struct tx_cmp *txcmp;
1745         struct rx_cmp_ext *rxcmp1;
1746         u32 cp_cons, tmp_raw_cons;
1747         u32 raw_cons = cpr->cp_raw_cons;
1748         u32 rx_pkts = 0;
1749         bool agg_event = false;
1750
1751         while (1) {
1752                 int rc;
1753
1754                 cp_cons = RING_CMP(raw_cons);
1755                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1756
1757                 if (!TX_CMP_VALID(txcmp, raw_cons))
1758                         break;
1759
1760                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1761                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1762                         cp_cons = RING_CMP(tmp_raw_cons);
1763                         rxcmp1 = (struct rx_cmp_ext *)
1764                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1765
1766                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1767                                 break;
1768
1769                         /* force an error to recycle the buffer */
1770                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1771                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1772
1773                         rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1774                         if (likely(rc == -EIO))
1775                                 rx_pkts++;
1776                         else if (rc == -EBUSY)  /* partial completion */
1777                                 break;
1778                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1779                                     CMPL_BASE_TYPE_HWRM_DONE)) {
1780                         bnxt_hwrm_handler(bp, txcmp);
1781                 } else {
1782                         netdev_err(bp->dev,
1783                                    "Invalid completion received on special ring\n");
1784                 }
1785                 raw_cons = NEXT_RAW_CMP(raw_cons);
1786
1787                 if (rx_pkts == budget)
1788                         break;
1789         }
1790
1791         cpr->cp_raw_cons = raw_cons;
1792         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1793         writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1794         writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1795
1796         if (agg_event) {
1797                 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1798                 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1799         }
1800
1801         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
1802                 napi_complete(napi);
1803                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1804         }
1805         return rx_pkts;
1806 }
1807
1808 static int bnxt_poll(struct napi_struct *napi, int budget)
1809 {
1810         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1811         struct bnxt *bp = bnapi->bp;
1812         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1813         int work_done = 0;
1814
1815         if (!bnxt_lock_napi(bnapi))
1816                 return budget;
1817
1818         while (1) {
1819                 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1820
1821                 if (work_done >= budget) {
1822                         if (!budget)
1823                                 BNXT_CP_DB_REARM(cpr->cp_doorbell,
1824                                                  cpr->cp_raw_cons);
1825                         break;
1826                 }
1827
1828                 if (!bnxt_has_work(bp, cpr)) {
1829                         napi_complete(napi);
1830                         BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1831                         break;
1832                 }
1833         }
1834         mmiowb();
1835         bnxt_unlock_napi(bnapi);
1836         return work_done;
1837 }
1838
1839 #ifdef CONFIG_NET_RX_BUSY_POLL
1840 static int bnxt_busy_poll(struct napi_struct *napi)
1841 {
1842         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1843         struct bnxt *bp = bnapi->bp;
1844         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1845         int rx_work, budget = 4;
1846
1847         if (atomic_read(&bp->intr_sem) != 0)
1848                 return LL_FLUSH_FAILED;
1849
1850         if (!bp->link_info.link_up)
1851                 return LL_FLUSH_FAILED;
1852
1853         if (!bnxt_lock_poll(bnapi))
1854                 return LL_FLUSH_BUSY;
1855
1856         rx_work = bnxt_poll_work(bp, bnapi, budget);
1857
1858         BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1859
1860         bnxt_unlock_poll(bnapi);
1861         return rx_work;
1862 }
1863 #endif
1864
1865 static void bnxt_free_tx_skbs(struct bnxt *bp)
1866 {
1867         int i, max_idx;
1868         struct pci_dev *pdev = bp->pdev;
1869
1870         if (!bp->tx_ring)
1871                 return;
1872
1873         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1874         for (i = 0; i < bp->tx_nr_rings; i++) {
1875                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1876                 int j;
1877
1878                 for (j = 0; j < max_idx;) {
1879                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1880                         struct sk_buff *skb = tx_buf->skb;
1881                         int k, last;
1882
1883                         if (!skb) {
1884                                 j++;
1885                                 continue;
1886                         }
1887
1888                         tx_buf->skb = NULL;
1889
1890                         if (tx_buf->is_push) {
1891                                 dev_kfree_skb(skb);
1892                                 j += 2;
1893                                 continue;
1894                         }
1895
1896                         dma_unmap_single(&pdev->dev,
1897                                          dma_unmap_addr(tx_buf, mapping),
1898                                          skb_headlen(skb),
1899                                          PCI_DMA_TODEVICE);
1900
1901                         last = tx_buf->nr_frags;
1902                         j += 2;
1903                         for (k = 0; k < last; k++, j++) {
1904                                 int ring_idx = j & bp->tx_ring_mask;
1905                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1906
1907                                 tx_buf = &txr->tx_buf_ring[ring_idx];
1908                                 dma_unmap_page(
1909                                         &pdev->dev,
1910                                         dma_unmap_addr(tx_buf, mapping),
1911                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
1912                         }
1913                         dev_kfree_skb(skb);
1914                 }
1915                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1916         }
1917 }
1918
1919 static void bnxt_free_rx_skbs(struct bnxt *bp)
1920 {
1921         int i, max_idx, max_agg_idx;
1922         struct pci_dev *pdev = bp->pdev;
1923
1924         if (!bp->rx_ring)
1925                 return;
1926
1927         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1928         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1929         for (i = 0; i < bp->rx_nr_rings; i++) {
1930                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1931                 int j;
1932
1933                 if (rxr->rx_tpa) {
1934                         for (j = 0; j < MAX_TPA; j++) {
1935                                 struct bnxt_tpa_info *tpa_info =
1936                                                         &rxr->rx_tpa[j];
1937                                 u8 *data = tpa_info->data;
1938
1939                                 if (!data)
1940                                         continue;
1941
1942                                 dma_unmap_single(
1943                                         &pdev->dev,
1944                                         dma_unmap_addr(tpa_info, mapping),
1945                                         bp->rx_buf_use_size,
1946                                         PCI_DMA_FROMDEVICE);
1947
1948                                 tpa_info->data = NULL;
1949
1950                                 kfree(data);
1951                         }
1952                 }
1953
1954                 for (j = 0; j < max_idx; j++) {
1955                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1956                         u8 *data = rx_buf->data;
1957
1958                         if (!data)
1959                                 continue;
1960
1961                         dma_unmap_single(&pdev->dev,
1962                                          dma_unmap_addr(rx_buf, mapping),
1963                                          bp->rx_buf_use_size,
1964                                          PCI_DMA_FROMDEVICE);
1965
1966                         rx_buf->data = NULL;
1967
1968                         kfree(data);
1969                 }
1970
1971                 for (j = 0; j < max_agg_idx; j++) {
1972                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1973                                 &rxr->rx_agg_ring[j];
1974                         struct page *page = rx_agg_buf->page;
1975
1976                         if (!page)
1977                                 continue;
1978
1979                         dma_unmap_page(&pdev->dev,
1980                                        dma_unmap_addr(rx_agg_buf, mapping),
1981                                        BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
1982
1983                         rx_agg_buf->page = NULL;
1984                         __clear_bit(j, rxr->rx_agg_bmap);
1985
1986                         __free_page(page);
1987                 }
1988                 if (rxr->rx_page) {
1989                         __free_page(rxr->rx_page);
1990                         rxr->rx_page = NULL;
1991                 }
1992         }
1993 }
1994
1995 static void bnxt_free_skbs(struct bnxt *bp)
1996 {
1997         bnxt_free_tx_skbs(bp);
1998         bnxt_free_rx_skbs(bp);
1999 }
2000
2001 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2002 {
2003         struct pci_dev *pdev = bp->pdev;
2004         int i;
2005
2006         for (i = 0; i < ring->nr_pages; i++) {
2007                 if (!ring->pg_arr[i])
2008                         continue;
2009
2010                 dma_free_coherent(&pdev->dev, ring->page_size,
2011                                   ring->pg_arr[i], ring->dma_arr[i]);
2012
2013                 ring->pg_arr[i] = NULL;
2014         }
2015         if (ring->pg_tbl) {
2016                 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
2017                                   ring->pg_tbl, ring->pg_tbl_map);
2018                 ring->pg_tbl = NULL;
2019         }
2020         if (ring->vmem_size && *ring->vmem) {
2021                 vfree(*ring->vmem);
2022                 *ring->vmem = NULL;
2023         }
2024 }
2025
2026 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2027 {
2028         int i;
2029         struct pci_dev *pdev = bp->pdev;
2030
2031         if (ring->nr_pages > 1) {
2032                 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
2033                                                   ring->nr_pages * 8,
2034                                                   &ring->pg_tbl_map,
2035                                                   GFP_KERNEL);
2036                 if (!ring->pg_tbl)
2037                         return -ENOMEM;
2038         }
2039
2040         for (i = 0; i < ring->nr_pages; i++) {
2041                 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2042                                                      ring->page_size,
2043                                                      &ring->dma_arr[i],
2044                                                      GFP_KERNEL);
2045                 if (!ring->pg_arr[i])
2046                         return -ENOMEM;
2047
2048                 if (ring->nr_pages > 1)
2049                         ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2050         }
2051
2052         if (ring->vmem_size) {
2053                 *ring->vmem = vzalloc(ring->vmem_size);
2054                 if (!(*ring->vmem))
2055                         return -ENOMEM;
2056         }
2057         return 0;
2058 }
2059
2060 static void bnxt_free_rx_rings(struct bnxt *bp)
2061 {
2062         int i;
2063
2064         if (!bp->rx_ring)
2065                 return;
2066
2067         for (i = 0; i < bp->rx_nr_rings; i++) {
2068                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2069                 struct bnxt_ring_struct *ring;
2070
2071                 kfree(rxr->rx_tpa);
2072                 rxr->rx_tpa = NULL;
2073
2074                 kfree(rxr->rx_agg_bmap);
2075                 rxr->rx_agg_bmap = NULL;
2076
2077                 ring = &rxr->rx_ring_struct;
2078                 bnxt_free_ring(bp, ring);
2079
2080                 ring = &rxr->rx_agg_ring_struct;
2081                 bnxt_free_ring(bp, ring);
2082         }
2083 }
2084
2085 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2086 {
2087         int i, rc, agg_rings = 0, tpa_rings = 0;
2088
2089         if (!bp->rx_ring)
2090                 return -ENOMEM;
2091
2092         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2093                 agg_rings = 1;
2094
2095         if (bp->flags & BNXT_FLAG_TPA)
2096                 tpa_rings = 1;
2097
2098         for (i = 0; i < bp->rx_nr_rings; i++) {
2099                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2100                 struct bnxt_ring_struct *ring;
2101
2102                 ring = &rxr->rx_ring_struct;
2103
2104                 rc = bnxt_alloc_ring(bp, ring);
2105                 if (rc)
2106                         return rc;
2107
2108                 if (agg_rings) {
2109                         u16 mem_size;
2110
2111                         ring = &rxr->rx_agg_ring_struct;
2112                         rc = bnxt_alloc_ring(bp, ring);
2113                         if (rc)
2114                                 return rc;
2115
2116                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2117                         mem_size = rxr->rx_agg_bmap_size / 8;
2118                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2119                         if (!rxr->rx_agg_bmap)
2120                                 return -ENOMEM;
2121
2122                         if (tpa_rings) {
2123                                 rxr->rx_tpa = kcalloc(MAX_TPA,
2124                                                 sizeof(struct bnxt_tpa_info),
2125                                                 GFP_KERNEL);
2126                                 if (!rxr->rx_tpa)
2127                                         return -ENOMEM;
2128                         }
2129                 }
2130         }
2131         return 0;
2132 }
2133
2134 static void bnxt_free_tx_rings(struct bnxt *bp)
2135 {
2136         int i;
2137         struct pci_dev *pdev = bp->pdev;
2138
2139         if (!bp->tx_ring)
2140                 return;
2141
2142         for (i = 0; i < bp->tx_nr_rings; i++) {
2143                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2144                 struct bnxt_ring_struct *ring;
2145
2146                 if (txr->tx_push) {
2147                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2148                                           txr->tx_push, txr->tx_push_mapping);
2149                         txr->tx_push = NULL;
2150                 }
2151
2152                 ring = &txr->tx_ring_struct;
2153
2154                 bnxt_free_ring(bp, ring);
2155         }
2156 }
2157
2158 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2159 {
2160         int i, j, rc;
2161         struct pci_dev *pdev = bp->pdev;
2162
2163         bp->tx_push_size = 0;
2164         if (bp->tx_push_thresh) {
2165                 int push_size;
2166
2167                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2168                                         bp->tx_push_thresh);
2169
2170                 if (push_size > 256) {
2171                         push_size = 0;
2172                         bp->tx_push_thresh = 0;
2173                 }
2174
2175                 bp->tx_push_size = push_size;
2176         }
2177
2178         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2179                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2180                 struct bnxt_ring_struct *ring;
2181
2182                 ring = &txr->tx_ring_struct;
2183
2184                 rc = bnxt_alloc_ring(bp, ring);
2185                 if (rc)
2186                         return rc;
2187
2188                 if (bp->tx_push_size) {
2189                         dma_addr_t mapping;
2190
2191                         /* One pre-allocated DMA buffer to backup
2192                          * TX push operation
2193                          */
2194                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2195                                                 bp->tx_push_size,
2196                                                 &txr->tx_push_mapping,
2197                                                 GFP_KERNEL);
2198
2199                         if (!txr->tx_push)
2200                                 return -ENOMEM;
2201
2202                         mapping = txr->tx_push_mapping +
2203                                 sizeof(struct tx_push_bd);
2204                         txr->data_mapping = cpu_to_le64(mapping);
2205
2206                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2207                 }
2208                 ring->queue_id = bp->q_info[j].queue_id;
2209                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2210                         j++;
2211         }
2212         return 0;
2213 }
2214
2215 static void bnxt_free_cp_rings(struct bnxt *bp)
2216 {
2217         int i;
2218
2219         if (!bp->bnapi)
2220                 return;
2221
2222         for (i = 0; i < bp->cp_nr_rings; i++) {
2223                 struct bnxt_napi *bnapi = bp->bnapi[i];
2224                 struct bnxt_cp_ring_info *cpr;
2225                 struct bnxt_ring_struct *ring;
2226
2227                 if (!bnapi)
2228                         continue;
2229
2230                 cpr = &bnapi->cp_ring;
2231                 ring = &cpr->cp_ring_struct;
2232
2233                 bnxt_free_ring(bp, ring);
2234         }
2235 }
2236
2237 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2238 {
2239         int i, rc;
2240
2241         for (i = 0; i < bp->cp_nr_rings; i++) {
2242                 struct bnxt_napi *bnapi = bp->bnapi[i];
2243                 struct bnxt_cp_ring_info *cpr;
2244                 struct bnxt_ring_struct *ring;
2245
2246                 if (!bnapi)
2247                         continue;
2248
2249                 cpr = &bnapi->cp_ring;
2250                 ring = &cpr->cp_ring_struct;
2251
2252                 rc = bnxt_alloc_ring(bp, ring);
2253                 if (rc)
2254                         return rc;
2255         }
2256         return 0;
2257 }
2258
2259 static void bnxt_init_ring_struct(struct bnxt *bp)
2260 {
2261         int i;
2262
2263         for (i = 0; i < bp->cp_nr_rings; i++) {
2264                 struct bnxt_napi *bnapi = bp->bnapi[i];
2265                 struct bnxt_cp_ring_info *cpr;
2266                 struct bnxt_rx_ring_info *rxr;
2267                 struct bnxt_tx_ring_info *txr;
2268                 struct bnxt_ring_struct *ring;
2269
2270                 if (!bnapi)
2271                         continue;
2272
2273                 cpr = &bnapi->cp_ring;
2274                 ring = &cpr->cp_ring_struct;
2275                 ring->nr_pages = bp->cp_nr_pages;
2276                 ring->page_size = HW_CMPD_RING_SIZE;
2277                 ring->pg_arr = (void **)cpr->cp_desc_ring;
2278                 ring->dma_arr = cpr->cp_desc_mapping;
2279                 ring->vmem_size = 0;
2280
2281                 rxr = bnapi->rx_ring;
2282                 if (!rxr)
2283                         goto skip_rx;
2284
2285                 ring = &rxr->rx_ring_struct;
2286                 ring->nr_pages = bp->rx_nr_pages;
2287                 ring->page_size = HW_RXBD_RING_SIZE;
2288                 ring->pg_arr = (void **)rxr->rx_desc_ring;
2289                 ring->dma_arr = rxr->rx_desc_mapping;
2290                 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2291                 ring->vmem = (void **)&rxr->rx_buf_ring;
2292
2293                 ring = &rxr->rx_agg_ring_struct;
2294                 ring->nr_pages = bp->rx_agg_nr_pages;
2295                 ring->page_size = HW_RXBD_RING_SIZE;
2296                 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2297                 ring->dma_arr = rxr->rx_agg_desc_mapping;
2298                 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2299                 ring->vmem = (void **)&rxr->rx_agg_ring;
2300
2301 skip_rx:
2302                 txr = bnapi->tx_ring;
2303                 if (!txr)
2304                         continue;
2305
2306                 ring = &txr->tx_ring_struct;
2307                 ring->nr_pages = bp->tx_nr_pages;
2308                 ring->page_size = HW_RXBD_RING_SIZE;
2309                 ring->pg_arr = (void **)txr->tx_desc_ring;
2310                 ring->dma_arr = txr->tx_desc_mapping;
2311                 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2312                 ring->vmem = (void **)&txr->tx_buf_ring;
2313         }
2314 }
2315
2316 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2317 {
2318         int i;
2319         u32 prod;
2320         struct rx_bd **rx_buf_ring;
2321
2322         rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2323         for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2324                 int j;
2325                 struct rx_bd *rxbd;
2326
2327                 rxbd = rx_buf_ring[i];
2328                 if (!rxbd)
2329                         continue;
2330
2331                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2332                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2333                         rxbd->rx_bd_opaque = prod;
2334                 }
2335         }
2336 }
2337
2338 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2339 {
2340         struct net_device *dev = bp->dev;
2341         struct bnxt_rx_ring_info *rxr;
2342         struct bnxt_ring_struct *ring;
2343         u32 prod, type;
2344         int i;
2345
2346         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2347                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2348
2349         if (NET_IP_ALIGN == 2)
2350                 type |= RX_BD_FLAGS_SOP;
2351
2352         rxr = &bp->rx_ring[ring_nr];
2353         ring = &rxr->rx_ring_struct;
2354         bnxt_init_rxbd_pages(ring, type);
2355
2356         prod = rxr->rx_prod;
2357         for (i = 0; i < bp->rx_ring_size; i++) {
2358                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2359                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2360                                     ring_nr, i, bp->rx_ring_size);
2361                         break;
2362                 }
2363                 prod = NEXT_RX(prod);
2364         }
2365         rxr->rx_prod = prod;
2366         ring->fw_ring_id = INVALID_HW_RING_ID;
2367
2368         ring = &rxr->rx_agg_ring_struct;
2369         ring->fw_ring_id = INVALID_HW_RING_ID;
2370
2371         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2372                 return 0;
2373
2374         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2375                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2376
2377         bnxt_init_rxbd_pages(ring, type);
2378
2379         prod = rxr->rx_agg_prod;
2380         for (i = 0; i < bp->rx_agg_ring_size; i++) {
2381                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2382                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2383                                     ring_nr, i, bp->rx_ring_size);
2384                         break;
2385                 }
2386                 prod = NEXT_RX_AGG(prod);
2387         }
2388         rxr->rx_agg_prod = prod;
2389
2390         if (bp->flags & BNXT_FLAG_TPA) {
2391                 if (rxr->rx_tpa) {
2392                         u8 *data;
2393                         dma_addr_t mapping;
2394
2395                         for (i = 0; i < MAX_TPA; i++) {
2396                                 data = __bnxt_alloc_rx_data(bp, &mapping,
2397                                                             GFP_KERNEL);
2398                                 if (!data)
2399                                         return -ENOMEM;
2400
2401                                 rxr->rx_tpa[i].data = data;
2402                                 rxr->rx_tpa[i].mapping = mapping;
2403                         }
2404                 } else {
2405                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2406                         return -ENOMEM;
2407                 }
2408         }
2409
2410         return 0;
2411 }
2412
2413 static void bnxt_init_cp_rings(struct bnxt *bp)
2414 {
2415         int i;
2416
2417         for (i = 0; i < bp->cp_nr_rings; i++) {
2418                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2419                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2420
2421                 ring->fw_ring_id = INVALID_HW_RING_ID;
2422         }
2423 }
2424
2425 static int bnxt_init_rx_rings(struct bnxt *bp)
2426 {
2427         int i, rc = 0;
2428
2429         for (i = 0; i < bp->rx_nr_rings; i++) {
2430                 rc = bnxt_init_one_rx_ring(bp, i);
2431                 if (rc)
2432                         break;
2433         }
2434
2435         return rc;
2436 }
2437
2438 static int bnxt_init_tx_rings(struct bnxt *bp)
2439 {
2440         u16 i;
2441
2442         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2443                                    MAX_SKB_FRAGS + 1);
2444
2445         for (i = 0; i < bp->tx_nr_rings; i++) {
2446                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2447                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2448
2449                 ring->fw_ring_id = INVALID_HW_RING_ID;
2450         }
2451
2452         return 0;
2453 }
2454
2455 static void bnxt_free_ring_grps(struct bnxt *bp)
2456 {
2457         kfree(bp->grp_info);
2458         bp->grp_info = NULL;
2459 }
2460
2461 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2462 {
2463         int i;
2464
2465         if (irq_re_init) {
2466                 bp->grp_info = kcalloc(bp->cp_nr_rings,
2467                                        sizeof(struct bnxt_ring_grp_info),
2468                                        GFP_KERNEL);
2469                 if (!bp->grp_info)
2470                         return -ENOMEM;
2471         }
2472         for (i = 0; i < bp->cp_nr_rings; i++) {
2473                 if (irq_re_init)
2474                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2475                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2476                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2477                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2478                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2479         }
2480         return 0;
2481 }
2482
2483 static void bnxt_free_vnics(struct bnxt *bp)
2484 {
2485         kfree(bp->vnic_info);
2486         bp->vnic_info = NULL;
2487         bp->nr_vnics = 0;
2488 }
2489
2490 static int bnxt_alloc_vnics(struct bnxt *bp)
2491 {
2492         int num_vnics = 1;
2493
2494 #ifdef CONFIG_RFS_ACCEL
2495         if (bp->flags & BNXT_FLAG_RFS)
2496                 num_vnics += bp->rx_nr_rings;
2497 #endif
2498
2499         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2500                 num_vnics++;
2501
2502         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2503                                 GFP_KERNEL);
2504         if (!bp->vnic_info)
2505                 return -ENOMEM;
2506
2507         bp->nr_vnics = num_vnics;
2508         return 0;
2509 }
2510
2511 static void bnxt_init_vnics(struct bnxt *bp)
2512 {
2513         int i;
2514
2515         for (i = 0; i < bp->nr_vnics; i++) {
2516                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2517
2518                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2519                 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2520                 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
2521                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2522
2523                 if (bp->vnic_info[i].rss_hash_key) {
2524                         if (i == 0)
2525                                 prandom_bytes(vnic->rss_hash_key,
2526                                               HW_HASH_KEY_SIZE);
2527                         else
2528                                 memcpy(vnic->rss_hash_key,
2529                                        bp->vnic_info[0].rss_hash_key,
2530                                        HW_HASH_KEY_SIZE);
2531                 }
2532         }
2533 }
2534
2535 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2536 {
2537         int pages;
2538
2539         pages = ring_size / desc_per_pg;
2540
2541         if (!pages)
2542                 return 1;
2543
2544         pages++;
2545
2546         while (pages & (pages - 1))
2547                 pages++;
2548
2549         return pages;
2550 }
2551
2552 static void bnxt_set_tpa_flags(struct bnxt *bp)
2553 {
2554         bp->flags &= ~BNXT_FLAG_TPA;
2555         if (bp->dev->features & NETIF_F_LRO)
2556                 bp->flags |= BNXT_FLAG_LRO;
2557         if (bp->dev->features & NETIF_F_GRO)
2558                 bp->flags |= BNXT_FLAG_GRO;
2559 }
2560
2561 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2562  * be set on entry.
2563  */
2564 void bnxt_set_ring_params(struct bnxt *bp)
2565 {
2566         u32 ring_size, rx_size, rx_space;
2567         u32 agg_factor = 0, agg_ring_size = 0;
2568
2569         /* 8 for CRC and VLAN */
2570         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2571
2572         rx_space = rx_size + NET_SKB_PAD +
2573                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2574
2575         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2576         ring_size = bp->rx_ring_size;
2577         bp->rx_agg_ring_size = 0;
2578         bp->rx_agg_nr_pages = 0;
2579
2580         if (bp->flags & BNXT_FLAG_TPA)
2581                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2582
2583         bp->flags &= ~BNXT_FLAG_JUMBO;
2584         if (rx_space > PAGE_SIZE) {
2585                 u32 jumbo_factor;
2586
2587                 bp->flags |= BNXT_FLAG_JUMBO;
2588                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2589                 if (jumbo_factor > agg_factor)
2590                         agg_factor = jumbo_factor;
2591         }
2592         agg_ring_size = ring_size * agg_factor;
2593
2594         if (agg_ring_size) {
2595                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2596                                                         RX_DESC_CNT);
2597                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2598                         u32 tmp = agg_ring_size;
2599
2600                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2601                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2602                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2603                                     tmp, agg_ring_size);
2604                 }
2605                 bp->rx_agg_ring_size = agg_ring_size;
2606                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2607                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2608                 rx_space = rx_size + NET_SKB_PAD +
2609                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2610         }
2611
2612         bp->rx_buf_use_size = rx_size;
2613         bp->rx_buf_size = rx_space;
2614
2615         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2616         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2617
2618         ring_size = bp->tx_ring_size;
2619         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2620         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2621
2622         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2623         bp->cp_ring_size = ring_size;
2624
2625         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2626         if (bp->cp_nr_pages > MAX_CP_PAGES) {
2627                 bp->cp_nr_pages = MAX_CP_PAGES;
2628                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2629                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2630                             ring_size, bp->cp_ring_size);
2631         }
2632         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2633         bp->cp_ring_mask = bp->cp_bit - 1;
2634 }
2635
2636 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2637 {
2638         int i;
2639         struct bnxt_vnic_info *vnic;
2640         struct pci_dev *pdev = bp->pdev;
2641
2642         if (!bp->vnic_info)
2643                 return;
2644
2645         for (i = 0; i < bp->nr_vnics; i++) {
2646                 vnic = &bp->vnic_info[i];
2647
2648                 kfree(vnic->fw_grp_ids);
2649                 vnic->fw_grp_ids = NULL;
2650
2651                 kfree(vnic->uc_list);
2652                 vnic->uc_list = NULL;
2653
2654                 if (vnic->mc_list) {
2655                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2656                                           vnic->mc_list, vnic->mc_list_mapping);
2657                         vnic->mc_list = NULL;
2658                 }
2659
2660                 if (vnic->rss_table) {
2661                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
2662                                           vnic->rss_table,
2663                                           vnic->rss_table_dma_addr);
2664                         vnic->rss_table = NULL;
2665                 }
2666
2667                 vnic->rss_hash_key = NULL;
2668                 vnic->flags = 0;
2669         }
2670 }
2671
2672 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2673 {
2674         int i, rc = 0, size;
2675         struct bnxt_vnic_info *vnic;
2676         struct pci_dev *pdev = bp->pdev;
2677         int max_rings;
2678
2679         for (i = 0; i < bp->nr_vnics; i++) {
2680                 vnic = &bp->vnic_info[i];
2681
2682                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2683                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2684
2685                         if (mem_size > 0) {
2686                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2687                                 if (!vnic->uc_list) {
2688                                         rc = -ENOMEM;
2689                                         goto out;
2690                                 }
2691                         }
2692                 }
2693
2694                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2695                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2696                         vnic->mc_list =
2697                                 dma_alloc_coherent(&pdev->dev,
2698                                                    vnic->mc_list_size,
2699                                                    &vnic->mc_list_mapping,
2700                                                    GFP_KERNEL);
2701                         if (!vnic->mc_list) {
2702                                 rc = -ENOMEM;
2703                                 goto out;
2704                         }
2705                 }
2706
2707                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2708                         max_rings = bp->rx_nr_rings;
2709                 else
2710                         max_rings = 1;
2711
2712                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2713                 if (!vnic->fw_grp_ids) {
2714                         rc = -ENOMEM;
2715                         goto out;
2716                 }
2717
2718                 /* Allocate rss table and hash key */
2719                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2720                                                      &vnic->rss_table_dma_addr,
2721                                                      GFP_KERNEL);
2722                 if (!vnic->rss_table) {
2723                         rc = -ENOMEM;
2724                         goto out;
2725                 }
2726
2727                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2728
2729                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2730                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2731         }
2732         return 0;
2733
2734 out:
2735         return rc;
2736 }
2737
2738 static void bnxt_free_hwrm_resources(struct bnxt *bp)
2739 {
2740         struct pci_dev *pdev = bp->pdev;
2741
2742         dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2743                           bp->hwrm_cmd_resp_dma_addr);
2744
2745         bp->hwrm_cmd_resp_addr = NULL;
2746         if (bp->hwrm_dbg_resp_addr) {
2747                 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2748                                   bp->hwrm_dbg_resp_addr,
2749                                   bp->hwrm_dbg_resp_dma_addr);
2750
2751                 bp->hwrm_dbg_resp_addr = NULL;
2752         }
2753 }
2754
2755 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2756 {
2757         struct pci_dev *pdev = bp->pdev;
2758
2759         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2760                                                    &bp->hwrm_cmd_resp_dma_addr,
2761                                                    GFP_KERNEL);
2762         if (!bp->hwrm_cmd_resp_addr)
2763                 return -ENOMEM;
2764         bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2765                                                     HWRM_DBG_REG_BUF_SIZE,
2766                                                     &bp->hwrm_dbg_resp_dma_addr,
2767                                                     GFP_KERNEL);
2768         if (!bp->hwrm_dbg_resp_addr)
2769                 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2770
2771         return 0;
2772 }
2773
2774 static void bnxt_free_stats(struct bnxt *bp)
2775 {
2776         u32 size, i;
2777         struct pci_dev *pdev = bp->pdev;
2778
2779         if (bp->hw_rx_port_stats) {
2780                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2781                                   bp->hw_rx_port_stats,
2782                                   bp->hw_rx_port_stats_map);
2783                 bp->hw_rx_port_stats = NULL;
2784                 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2785         }
2786
2787         if (!bp->bnapi)
2788                 return;
2789
2790         size = sizeof(struct ctx_hw_stats);
2791
2792         for (i = 0; i < bp->cp_nr_rings; i++) {
2793                 struct bnxt_napi *bnapi = bp->bnapi[i];
2794                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2795
2796                 if (cpr->hw_stats) {
2797                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2798                                           cpr->hw_stats_map);
2799                         cpr->hw_stats = NULL;
2800                 }
2801         }
2802 }
2803
2804 static int bnxt_alloc_stats(struct bnxt *bp)
2805 {
2806         u32 size, i;
2807         struct pci_dev *pdev = bp->pdev;
2808
2809         size = sizeof(struct ctx_hw_stats);
2810
2811         for (i = 0; i < bp->cp_nr_rings; i++) {
2812                 struct bnxt_napi *bnapi = bp->bnapi[i];
2813                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2814
2815                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2816                                                    &cpr->hw_stats_map,
2817                                                    GFP_KERNEL);
2818                 if (!cpr->hw_stats)
2819                         return -ENOMEM;
2820
2821                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2822         }
2823
2824         if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
2825                 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2826                                          sizeof(struct tx_port_stats) + 1024;
2827
2828                 bp->hw_rx_port_stats =
2829                         dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2830                                            &bp->hw_rx_port_stats_map,
2831                                            GFP_KERNEL);
2832                 if (!bp->hw_rx_port_stats)
2833                         return -ENOMEM;
2834
2835                 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2836                                        512;
2837                 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2838                                            sizeof(struct rx_port_stats) + 512;
2839                 bp->flags |= BNXT_FLAG_PORT_STATS;
2840         }
2841         return 0;
2842 }
2843
2844 static void bnxt_clear_ring_indices(struct bnxt *bp)
2845 {
2846         int i;
2847
2848         if (!bp->bnapi)
2849                 return;
2850
2851         for (i = 0; i < bp->cp_nr_rings; i++) {
2852                 struct bnxt_napi *bnapi = bp->bnapi[i];
2853                 struct bnxt_cp_ring_info *cpr;
2854                 struct bnxt_rx_ring_info *rxr;
2855                 struct bnxt_tx_ring_info *txr;
2856
2857                 if (!bnapi)
2858                         continue;
2859
2860                 cpr = &bnapi->cp_ring;
2861                 cpr->cp_raw_cons = 0;
2862
2863                 txr = bnapi->tx_ring;
2864                 if (txr) {
2865                         txr->tx_prod = 0;
2866                         txr->tx_cons = 0;
2867                 }
2868
2869                 rxr = bnapi->rx_ring;
2870                 if (rxr) {
2871                         rxr->rx_prod = 0;
2872                         rxr->rx_agg_prod = 0;
2873                         rxr->rx_sw_agg_prod = 0;
2874                         rxr->rx_next_cons = 0;
2875                 }
2876         }
2877 }
2878
2879 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2880 {
2881 #ifdef CONFIG_RFS_ACCEL
2882         int i;
2883
2884         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
2885          * safe to delete the hash table.
2886          */
2887         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2888                 struct hlist_head *head;
2889                 struct hlist_node *tmp;
2890                 struct bnxt_ntuple_filter *fltr;
2891
2892                 head = &bp->ntp_fltr_hash_tbl[i];
2893                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2894                         hlist_del(&fltr->hash);
2895                         kfree(fltr);
2896                 }
2897         }
2898         if (irq_reinit) {
2899                 kfree(bp->ntp_fltr_bmap);
2900                 bp->ntp_fltr_bmap = NULL;
2901         }
2902         bp->ntp_fltr_count = 0;
2903 #endif
2904 }
2905
2906 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2907 {
2908 #ifdef CONFIG_RFS_ACCEL
2909         int i, rc = 0;
2910
2911         if (!(bp->flags & BNXT_FLAG_RFS))
2912                 return 0;
2913
2914         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2915                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2916
2917         bp->ntp_fltr_count = 0;
2918         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2919                                     sizeof(long),
2920                                     GFP_KERNEL);
2921
2922         if (!bp->ntp_fltr_bmap)
2923                 rc = -ENOMEM;
2924
2925         return rc;
2926 #else
2927         return 0;
2928 #endif
2929 }
2930
2931 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2932 {
2933         bnxt_free_vnic_attributes(bp);
2934         bnxt_free_tx_rings(bp);
2935         bnxt_free_rx_rings(bp);
2936         bnxt_free_cp_rings(bp);
2937         bnxt_free_ntp_fltrs(bp, irq_re_init);
2938         if (irq_re_init) {
2939                 bnxt_free_stats(bp);
2940                 bnxt_free_ring_grps(bp);
2941                 bnxt_free_vnics(bp);
2942                 kfree(bp->tx_ring);
2943                 bp->tx_ring = NULL;
2944                 kfree(bp->rx_ring);
2945                 bp->rx_ring = NULL;
2946                 kfree(bp->bnapi);
2947                 bp->bnapi = NULL;
2948         } else {
2949                 bnxt_clear_ring_indices(bp);
2950         }
2951 }
2952
2953 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2954 {
2955         int i, j, rc, size, arr_size;
2956         void *bnapi;
2957
2958         if (irq_re_init) {
2959                 /* Allocate bnapi mem pointer array and mem block for
2960                  * all queues
2961                  */
2962                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2963                                 bp->cp_nr_rings);
2964                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2965                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2966                 if (!bnapi)
2967                         return -ENOMEM;
2968
2969                 bp->bnapi = bnapi;
2970                 bnapi += arr_size;
2971                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2972                         bp->bnapi[i] = bnapi;
2973                         bp->bnapi[i]->index = i;
2974                         bp->bnapi[i]->bp = bp;
2975                 }
2976
2977                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
2978                                       sizeof(struct bnxt_rx_ring_info),
2979                                       GFP_KERNEL);
2980                 if (!bp->rx_ring)
2981                         return -ENOMEM;
2982
2983                 for (i = 0; i < bp->rx_nr_rings; i++) {
2984                         bp->rx_ring[i].bnapi = bp->bnapi[i];
2985                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2986                 }
2987
2988                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
2989                                       sizeof(struct bnxt_tx_ring_info),
2990                                       GFP_KERNEL);
2991                 if (!bp->tx_ring)
2992                         return -ENOMEM;
2993
2994                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
2995                         j = 0;
2996                 else
2997                         j = bp->rx_nr_rings;
2998
2999                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3000                         bp->tx_ring[i].bnapi = bp->bnapi[j];
3001                         bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
3002                 }
3003
3004                 rc = bnxt_alloc_stats(bp);
3005                 if (rc)
3006                         goto alloc_mem_err;
3007
3008                 rc = bnxt_alloc_ntp_fltrs(bp);
3009                 if (rc)
3010                         goto alloc_mem_err;
3011
3012                 rc = bnxt_alloc_vnics(bp);
3013                 if (rc)
3014                         goto alloc_mem_err;
3015         }
3016
3017         bnxt_init_ring_struct(bp);
3018
3019         rc = bnxt_alloc_rx_rings(bp);
3020         if (rc)
3021                 goto alloc_mem_err;
3022
3023         rc = bnxt_alloc_tx_rings(bp);
3024         if (rc)
3025                 goto alloc_mem_err;
3026
3027         rc = bnxt_alloc_cp_rings(bp);
3028         if (rc)
3029                 goto alloc_mem_err;
3030
3031         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3032                                   BNXT_VNIC_UCAST_FLAG;
3033         rc = bnxt_alloc_vnic_attributes(bp);
3034         if (rc)
3035                 goto alloc_mem_err;
3036         return 0;
3037
3038 alloc_mem_err:
3039         bnxt_free_mem(bp, true);
3040         return rc;
3041 }
3042
3043 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3044                             u16 cmpl_ring, u16 target_id)
3045 {
3046         struct input *req = request;
3047
3048         req->req_type = cpu_to_le16(req_type);
3049         req->cmpl_ring = cpu_to_le16(cmpl_ring);
3050         req->target_id = cpu_to_le16(target_id);
3051         req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3052 }
3053
3054 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3055                                  int timeout, bool silent)
3056 {
3057         int i, intr_process, rc, tmo_count;
3058         struct input *req = msg;
3059         u32 *data = msg;
3060         __le32 *resp_len, *valid;
3061         u16 cp_ring_id, len = 0;
3062         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3063
3064         req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
3065         memset(resp, 0, PAGE_SIZE);
3066         cp_ring_id = le16_to_cpu(req->cmpl_ring);
3067         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3068
3069         /* Write request msg to hwrm channel */
3070         __iowrite32_copy(bp->bar0, data, msg_len / 4);
3071
3072         for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
3073                 writel(0, bp->bar0 + i);
3074
3075         /* currently supports only one outstanding message */
3076         if (intr_process)
3077                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3078
3079         /* Ring channel doorbell */
3080         writel(1, bp->bar0 + 0x100);
3081
3082         if (!timeout)
3083                 timeout = DFLT_HWRM_CMD_TIMEOUT;
3084
3085         i = 0;
3086         tmo_count = timeout * 40;
3087         if (intr_process) {
3088                 /* Wait until hwrm response cmpl interrupt is processed */
3089                 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
3090                        i++ < tmo_count) {
3091                         usleep_range(25, 40);
3092                 }
3093
3094                 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3095                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3096                                    le16_to_cpu(req->req_type));
3097                         return -1;
3098                 }
3099         } else {
3100                 /* Check if response len is updated */
3101                 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
3102                 for (i = 0; i < tmo_count; i++) {
3103                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3104                               HWRM_RESP_LEN_SFT;
3105                         if (len)
3106                                 break;
3107                         usleep_range(25, 40);
3108                 }
3109
3110                 if (i >= tmo_count) {
3111                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3112                                    timeout, le16_to_cpu(req->req_type),
3113                                    le16_to_cpu(req->seq_id), len);
3114                         return -1;
3115                 }
3116
3117                 /* Last word of resp contains valid bit */
3118                 valid = bp->hwrm_cmd_resp_addr + len - 4;
3119                 for (i = 0; i < 5; i++) {
3120                         if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3121                                 break;
3122                         udelay(1);
3123                 }
3124
3125                 if (i >= 5) {
3126                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3127                                    timeout, le16_to_cpu(req->req_type),
3128                                    le16_to_cpu(req->seq_id), len, *valid);
3129                         return -1;
3130                 }
3131         }
3132
3133         rc = le16_to_cpu(resp->error_code);
3134         if (rc && !silent)
3135                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3136                            le16_to_cpu(resp->req_type),
3137                            le16_to_cpu(resp->seq_id), rc);
3138         return rc;
3139 }
3140
3141 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3142 {
3143         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3144 }
3145
3146 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3147 {
3148         int rc;
3149
3150         mutex_lock(&bp->hwrm_cmd_lock);
3151         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3152         mutex_unlock(&bp->hwrm_cmd_lock);
3153         return rc;
3154 }
3155
3156 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3157                              int timeout)
3158 {
3159         int rc;
3160
3161         mutex_lock(&bp->hwrm_cmd_lock);
3162         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3163         mutex_unlock(&bp->hwrm_cmd_lock);
3164         return rc;
3165 }
3166
3167 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3168 {
3169         struct hwrm_func_drv_rgtr_input req = {0};
3170         int i;
3171         DECLARE_BITMAP(async_events_bmap, 256);
3172         u32 *events = (u32 *)async_events_bmap;
3173
3174         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3175
3176         req.enables =
3177                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3178                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
3179                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3180
3181         memset(async_events_bmap, 0, sizeof(async_events_bmap));
3182         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3183                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3184
3185         for (i = 0; i < 8; i++)
3186                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3187
3188         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
3189         req.ver_maj = DRV_VER_MAJ;
3190         req.ver_min = DRV_VER_MIN;
3191         req.ver_upd = DRV_VER_UPD;
3192
3193         if (BNXT_PF(bp)) {
3194                 DECLARE_BITMAP(vf_req_snif_bmap, 256);
3195                 u32 *data = (u32 *)vf_req_snif_bmap;
3196
3197                 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
3198                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
3199                         __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
3200
3201                 for (i = 0; i < 8; i++)
3202                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3203
3204                 req.enables |=
3205                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3206         }
3207
3208         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3209 }
3210
3211 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3212 {
3213         struct hwrm_func_drv_unrgtr_input req = {0};
3214
3215         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3216         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3217 }
3218
3219 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3220 {
3221         u32 rc = 0;
3222         struct hwrm_tunnel_dst_port_free_input req = {0};
3223
3224         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3225         req.tunnel_type = tunnel_type;
3226
3227         switch (tunnel_type) {
3228         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3229                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3230                 break;
3231         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3232                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3233                 break;
3234         default:
3235                 break;
3236         }
3237
3238         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3239         if (rc)
3240                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3241                            rc);
3242         return rc;
3243 }
3244
3245 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3246                                            u8 tunnel_type)
3247 {
3248         u32 rc = 0;
3249         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3250         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3251
3252         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3253
3254         req.tunnel_type = tunnel_type;
3255         req.tunnel_dst_port_val = port;
3256
3257         mutex_lock(&bp->hwrm_cmd_lock);
3258         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3259         if (rc) {
3260                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3261                            rc);
3262                 goto err_out;
3263         }
3264
3265         switch (tunnel_type) {
3266         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
3267                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3268                 break;
3269         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
3270                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
3271                 break;
3272         default:
3273                 break;
3274         }
3275
3276 err_out:
3277         mutex_unlock(&bp->hwrm_cmd_lock);
3278         return rc;
3279 }
3280
3281 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3282 {
3283         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3284         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3285
3286         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
3287         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3288
3289         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3290         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3291         req.mask = cpu_to_le32(vnic->rx_mask);
3292         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3293 }
3294
3295 #ifdef CONFIG_RFS_ACCEL
3296 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3297                                             struct bnxt_ntuple_filter *fltr)
3298 {
3299         struct hwrm_cfa_ntuple_filter_free_input req = {0};
3300
3301         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3302         req.ntuple_filter_id = fltr->filter_id;
3303         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3304 }
3305
3306 #define BNXT_NTP_FLTR_FLAGS                                     \
3307         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
3308          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
3309          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
3310          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
3311          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
3312          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
3313          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
3314          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
3315          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
3316          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
3317          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
3318          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
3319          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
3320          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3321
3322 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3323                                              struct bnxt_ntuple_filter *fltr)
3324 {
3325         int rc = 0;
3326         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3327         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3328                 bp->hwrm_cmd_resp_addr;
3329         struct flow_keys *keys = &fltr->fkeys;
3330         struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3331
3332         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
3333         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
3334
3335         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3336
3337         req.ethertype = htons(ETH_P_IP);
3338         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
3339         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
3340         req.ip_protocol = keys->basic.ip_proto;
3341
3342         req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3343         req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3344         req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3345         req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3346
3347         req.src_port = keys->ports.src;
3348         req.src_port_mask = cpu_to_be16(0xffff);
3349         req.dst_port = keys->ports.dst;
3350         req.dst_port_mask = cpu_to_be16(0xffff);
3351
3352         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
3353         mutex_lock(&bp->hwrm_cmd_lock);
3354         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3355         if (!rc)
3356                 fltr->filter_id = resp->ntuple_filter_id;
3357         mutex_unlock(&bp->hwrm_cmd_lock);
3358         return rc;
3359 }
3360 #endif
3361
3362 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3363                                      u8 *mac_addr)
3364 {
3365         u32 rc = 0;
3366         struct hwrm_cfa_l2_filter_alloc_input req = {0};
3367         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3368
3369         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
3370         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3371         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3372                 req.flags |=
3373                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
3374         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
3375         req.enables =
3376                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
3377                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
3378                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3379         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3380         req.l2_addr_mask[0] = 0xff;
3381         req.l2_addr_mask[1] = 0xff;
3382         req.l2_addr_mask[2] = 0xff;
3383         req.l2_addr_mask[3] = 0xff;
3384         req.l2_addr_mask[4] = 0xff;
3385         req.l2_addr_mask[5] = 0xff;
3386
3387         mutex_lock(&bp->hwrm_cmd_lock);
3388         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3389         if (!rc)
3390                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3391                                                         resp->l2_filter_id;
3392         mutex_unlock(&bp->hwrm_cmd_lock);
3393         return rc;
3394 }
3395
3396 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3397 {
3398         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3399         int rc = 0;
3400
3401         /* Any associated ntuple filters will also be cleared by firmware. */
3402         mutex_lock(&bp->hwrm_cmd_lock);
3403         for (i = 0; i < num_of_vnics; i++) {
3404                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3405
3406                 for (j = 0; j < vnic->uc_filter_count; j++) {
3407                         struct hwrm_cfa_l2_filter_free_input req = {0};
3408
3409                         bnxt_hwrm_cmd_hdr_init(bp, &req,
3410                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
3411
3412                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
3413
3414                         rc = _hwrm_send_message(bp, &req, sizeof(req),
3415                                                 HWRM_CMD_TIMEOUT);
3416                 }
3417                 vnic->uc_filter_count = 0;
3418         }
3419         mutex_unlock(&bp->hwrm_cmd_lock);
3420
3421         return rc;
3422 }
3423
3424 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3425 {
3426         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3427         struct hwrm_vnic_tpa_cfg_input req = {0};
3428
3429         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3430                 return 0;
3431
3432         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3433
3434         if (tpa_flags) {
3435                 u16 mss = bp->dev->mtu - 40;
3436                 u32 nsegs, n, segs = 0, flags;
3437
3438                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3439                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3440                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3441                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3442                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3443                 if (tpa_flags & BNXT_FLAG_GRO)
3444                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3445
3446                 req.flags = cpu_to_le32(flags);
3447
3448                 req.enables =
3449                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3450                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3451                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3452
3453                 /* Number of segs are log2 units, and first packet is not
3454                  * included as part of this units.
3455                  */
3456                 if (mss <= BNXT_RX_PAGE_SIZE) {
3457                         n = BNXT_RX_PAGE_SIZE / mss;
3458                         nsegs = (MAX_SKB_FRAGS - 1) * n;
3459                 } else {
3460                         n = mss / BNXT_RX_PAGE_SIZE;
3461                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
3462                                 n++;
3463                         nsegs = (MAX_SKB_FRAGS - n) / n;
3464                 }
3465
3466                 segs = ilog2(nsegs);
3467                 req.max_agg_segs = cpu_to_le16(segs);
3468                 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
3469
3470                 req.min_agg_len = cpu_to_le32(512);
3471         }
3472         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3473
3474         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3475 }
3476
3477 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3478 {
3479         u32 i, j, max_rings;
3480         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3481         struct hwrm_vnic_rss_cfg_input req = {0};
3482
3483         if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
3484                 return 0;
3485
3486         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3487         if (set_rss) {
3488                 vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
3489                                   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
3490                                   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
3491                                   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
3492
3493                 req.hash_type = cpu_to_le32(vnic->hash_type);
3494
3495                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3496                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3497                                 max_rings = bp->rx_nr_rings - 1;
3498                         else
3499                                 max_rings = bp->rx_nr_rings;
3500                 } else {
3501                         max_rings = 1;
3502                 }
3503
3504                 /* Fill the RSS indirection table with ring group ids */
3505                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3506                         if (j == max_rings)
3507                                 j = 0;
3508                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3509                 }
3510
3511                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3512                 req.hash_key_tbl_addr =
3513                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
3514         }
3515         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3516         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3517 }
3518
3519 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3520 {
3521         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3522         struct hwrm_vnic_plcmodes_cfg_input req = {0};
3523
3524         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3525         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3526                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3527                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3528         req.enables =
3529                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3530                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3531         /* thresholds not implemented in firmware yet */
3532         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3533         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3534         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3535         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3536 }
3537
3538 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3539                                         u16 ctx_idx)
3540 {
3541         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3542
3543         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3544         req.rss_cos_lb_ctx_id =
3545                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
3546
3547         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3548         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
3549 }
3550
3551 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3552 {
3553         int i, j;
3554
3555         for (i = 0; i < bp->nr_vnics; i++) {
3556                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3557
3558                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3559                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3560                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3561                 }
3562         }
3563         bp->rsscos_nr_ctxs = 0;
3564 }
3565
3566 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
3567 {
3568         int rc;
3569         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3570         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3571                                                 bp->hwrm_cmd_resp_addr;
3572
3573         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3574                                -1);
3575
3576         mutex_lock(&bp->hwrm_cmd_lock);
3577         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3578         if (!rc)
3579                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
3580                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
3581         mutex_unlock(&bp->hwrm_cmd_lock);
3582
3583         return rc;
3584 }
3585
3586 static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3587 {
3588         unsigned int ring = 0, grp_idx;
3589         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3590         struct hwrm_vnic_cfg_input req = {0};
3591         u16 def_vlan = 0;
3592
3593         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3594
3595         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3596         /* Only RSS support for now TBD: COS & LB */
3597         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3598                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3599                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3600                                            VNIC_CFG_REQ_ENABLES_MRU);
3601         } else {
3602                 req.rss_rule = cpu_to_le16(0xffff);
3603         }
3604
3605         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3606             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
3607                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3608                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3609         } else {
3610                 req.cos_rule = cpu_to_le16(0xffff);
3611         }
3612
3613         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3614                 ring = 0;
3615         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3616                 ring = vnic_id - 1;
3617         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3618                 ring = bp->rx_nr_rings - 1;
3619
3620         grp_idx = bp->rx_ring[ring].bnapi->index;
3621         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3622         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3623
3624         req.lb_rule = cpu_to_le16(0xffff);
3625         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3626                               VLAN_HLEN);
3627
3628 #ifdef CONFIG_BNXT_SRIOV
3629         if (BNXT_VF(bp))
3630                 def_vlan = bp->vf.vlan;
3631 #endif
3632         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
3633                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3634
3635         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3636 }
3637
3638 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3639 {
3640         u32 rc = 0;
3641
3642         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3643                 struct hwrm_vnic_free_input req = {0};
3644
3645                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3646                 req.vnic_id =
3647                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3648
3649                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3650                 if (rc)
3651                         return rc;
3652                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3653         }
3654         return rc;
3655 }
3656
3657 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3658 {
3659         u16 i;
3660
3661         for (i = 0; i < bp->nr_vnics; i++)
3662                 bnxt_hwrm_vnic_free_one(bp, i);
3663 }
3664
3665 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3666                                 unsigned int start_rx_ring_idx,
3667                                 unsigned int nr_rings)
3668 {
3669         int rc = 0;
3670         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
3671         struct hwrm_vnic_alloc_input req = {0};
3672         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3673
3674         /* map ring groups to this vnic */
3675         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3676                 grp_idx = bp->rx_ring[i].bnapi->index;
3677                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
3678                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3679                                    j, nr_rings);
3680                         break;
3681                 }
3682                 bp->vnic_info[vnic_id].fw_grp_ids[j] =
3683                                         bp->grp_info[grp_idx].fw_grp_id;
3684         }
3685
3686         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
3687         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
3688         if (vnic_id == 0)
3689                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3690
3691         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3692
3693         mutex_lock(&bp->hwrm_cmd_lock);
3694         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3695         if (!rc)
3696                 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3697         mutex_unlock(&bp->hwrm_cmd_lock);
3698         return rc;
3699 }
3700
3701 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3702 {
3703         u16 i;
3704         u32 rc = 0;
3705
3706         mutex_lock(&bp->hwrm_cmd_lock);
3707         for (i = 0; i < bp->rx_nr_rings; i++) {
3708                 struct hwrm_ring_grp_alloc_input req = {0};
3709                 struct hwrm_ring_grp_alloc_output *resp =
3710                                         bp->hwrm_cmd_resp_addr;
3711                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
3712
3713                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3714
3715                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3716                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3717                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3718                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
3719
3720                 rc = _hwrm_send_message(bp, &req, sizeof(req),
3721                                         HWRM_CMD_TIMEOUT);
3722                 if (rc)
3723                         break;
3724
3725                 bp->grp_info[grp_idx].fw_grp_id =
3726                         le32_to_cpu(resp->ring_group_id);
3727         }
3728         mutex_unlock(&bp->hwrm_cmd_lock);
3729         return rc;
3730 }
3731
3732 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3733 {
3734         u16 i;
3735         u32 rc = 0;
3736         struct hwrm_ring_grp_free_input req = {0};
3737
3738         if (!bp->grp_info)
3739                 return 0;
3740
3741         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3742
3743         mutex_lock(&bp->hwrm_cmd_lock);
3744         for (i = 0; i < bp->cp_nr_rings; i++) {
3745                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3746                         continue;
3747                 req.ring_group_id =
3748                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
3749
3750                 rc = _hwrm_send_message(bp, &req, sizeof(req),
3751                                         HWRM_CMD_TIMEOUT);
3752                 if (rc)
3753                         break;
3754                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3755         }
3756         mutex_unlock(&bp->hwrm_cmd_lock);
3757         return rc;
3758 }
3759
3760 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3761                                     struct bnxt_ring_struct *ring,
3762                                     u32 ring_type, u32 map_index,
3763                                     u32 stats_ctx_id)
3764 {
3765         int rc = 0, err = 0;
3766         struct hwrm_ring_alloc_input req = {0};
3767         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3768         u16 ring_id;
3769
3770         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3771
3772         req.enables = 0;
3773         if (ring->nr_pages > 1) {
3774                 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3775                 /* Page size is in log2 units */
3776                 req.page_size = BNXT_PAGE_SHIFT;
3777                 req.page_tbl_depth = 1;
3778         } else {
3779                 req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
3780         }
3781         req.fbo = 0;
3782         /* Association of ring index with doorbell index and MSIX number */
3783         req.logical_id = cpu_to_le16(map_index);
3784
3785         switch (ring_type) {
3786         case HWRM_RING_ALLOC_TX:
3787                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3788                 /* Association of transmit ring with completion ring */
3789                 req.cmpl_ring_id =
3790                         cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3791                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3792                 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3793                 req.queue_id = cpu_to_le16(ring->queue_id);
3794                 break;
3795         case HWRM_RING_ALLOC_RX:
3796                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3797                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3798                 break;
3799         case HWRM_RING_ALLOC_AGG:
3800                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3801                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3802                 break;
3803         case HWRM_RING_ALLOC_CMPL:
3804                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3805                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3806                 if (bp->flags & BNXT_FLAG_USING_MSIX)
3807                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3808                 break;
3809         default:
3810                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3811                            ring_type);
3812                 return -1;
3813         }
3814
3815         mutex_lock(&bp->hwrm_cmd_lock);
3816         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3817         err = le16_to_cpu(resp->error_code);
3818         ring_id = le16_to_cpu(resp->ring_id);
3819         mutex_unlock(&bp->hwrm_cmd_lock);
3820
3821         if (rc || err) {
3822                 switch (ring_type) {
3823                 case RING_FREE_REQ_RING_TYPE_CMPL:
3824                         netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3825                                    rc, err);
3826                         return -1;
3827
3828                 case RING_FREE_REQ_RING_TYPE_RX:
3829                         netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3830                                    rc, err);
3831                         return -1;
3832
3833                 case RING_FREE_REQ_RING_TYPE_TX:
3834                         netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3835                                    rc, err);
3836                         return -1;
3837
3838                 default:
3839                         netdev_err(bp->dev, "Invalid ring\n");
3840                         return -1;
3841                 }
3842         }
3843         ring->fw_ring_id = ring_id;
3844         return rc;
3845 }
3846
3847 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
3848 {
3849         int rc;
3850
3851         if (BNXT_PF(bp)) {
3852                 struct hwrm_func_cfg_input req = {0};
3853
3854                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
3855                 req.fid = cpu_to_le16(0xffff);
3856                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
3857                 req.async_event_cr = cpu_to_le16(idx);
3858                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3859         } else {
3860                 struct hwrm_func_vf_cfg_input req = {0};
3861
3862                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
3863                 req.enables =
3864                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
3865                 req.async_event_cr = cpu_to_le16(idx);
3866                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3867         }
3868         return rc;
3869 }
3870
3871 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3872 {
3873         int i, rc = 0;
3874
3875         for (i = 0; i < bp->cp_nr_rings; i++) {
3876                 struct bnxt_napi *bnapi = bp->bnapi[i];
3877                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3878                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3879
3880                 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3881                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3882                                               INVALID_STATS_CTX_ID);
3883                 if (rc)
3884                         goto err_out;
3885                 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3886                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3887
3888                 if (!i) {
3889                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
3890                         if (rc)
3891                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
3892                 }
3893         }
3894
3895         for (i = 0; i < bp->tx_nr_rings; i++) {
3896                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3897                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3898                 u32 map_idx = txr->bnapi->index;
3899                 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
3900
3901                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
3902                                               map_idx, fw_stats_ctx);
3903                 if (rc)
3904                         goto err_out;
3905                 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
3906         }
3907
3908         for (i = 0; i < bp->rx_nr_rings; i++) {
3909                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3910                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3911                 u32 map_idx = rxr->bnapi->index;
3912
3913                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
3914                                               map_idx, INVALID_STATS_CTX_ID);
3915                 if (rc)
3916                         goto err_out;
3917                 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
3918                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
3919                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3920         }
3921
3922         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3923                 for (i = 0; i < bp->rx_nr_rings; i++) {
3924                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3925                         struct bnxt_ring_struct *ring =
3926                                                 &rxr->rx_agg_ring_struct;
3927                         u32 grp_idx = rxr->bnapi->index;
3928                         u32 map_idx = grp_idx + bp->rx_nr_rings;
3929
3930                         rc = hwrm_ring_alloc_send_msg(bp, ring,
3931                                                       HWRM_RING_ALLOC_AGG,
3932                                                       map_idx,
3933                                                       INVALID_STATS_CTX_ID);
3934                         if (rc)
3935                                 goto err_out;
3936
3937                         rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
3938                         writel(DB_KEY_RX | rxr->rx_agg_prod,
3939                                rxr->rx_agg_doorbell);
3940                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
3941                 }
3942         }
3943 err_out:
3944         return rc;
3945 }
3946
3947 static int hwrm_ring_free_send_msg(struct bnxt *bp,
3948                                    struct bnxt_ring_struct *ring,
3949                                    u32 ring_type, int cmpl_ring_id)
3950 {
3951         int rc;
3952         struct hwrm_ring_free_input req = {0};
3953         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3954         u16 error_code;
3955
3956         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
3957         req.ring_type = ring_type;
3958         req.ring_id = cpu_to_le16(ring->fw_ring_id);
3959
3960         mutex_lock(&bp->hwrm_cmd_lock);
3961         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3962         error_code = le16_to_cpu(resp->error_code);
3963         mutex_unlock(&bp->hwrm_cmd_lock);
3964
3965         if (rc || error_code) {
3966                 switch (ring_type) {
3967                 case RING_FREE_REQ_RING_TYPE_CMPL:
3968                         netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3969                                    rc);
3970                         return rc;
3971                 case RING_FREE_REQ_RING_TYPE_RX:
3972                         netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3973                                    rc);
3974                         return rc;
3975                 case RING_FREE_REQ_RING_TYPE_TX:
3976                         netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3977                                    rc);
3978                         return rc;
3979                 default:
3980                         netdev_err(bp->dev, "Invalid ring\n");
3981                         return -1;
3982                 }
3983         }
3984         return 0;
3985 }
3986
3987 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
3988 {
3989         int i;
3990
3991         if (!bp->bnapi)
3992                 return;
3993
3994         for (i = 0; i < bp->tx_nr_rings; i++) {
3995                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3996                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3997                 u32 grp_idx = txr->bnapi->index;
3998                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3999
4000                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4001                         hwrm_ring_free_send_msg(bp, ring,
4002                                                 RING_FREE_REQ_RING_TYPE_TX,
4003                                                 close_path ? cmpl_ring_id :
4004                                                 INVALID_HW_RING_ID);
4005                         ring->fw_ring_id = INVALID_HW_RING_ID;
4006                 }
4007         }
4008
4009         for (i = 0; i < bp->rx_nr_rings; i++) {
4010                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4011                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4012                 u32 grp_idx = rxr->bnapi->index;
4013                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4014
4015                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4016                         hwrm_ring_free_send_msg(bp, ring,
4017                                                 RING_FREE_REQ_RING_TYPE_RX,
4018                                                 close_path ? cmpl_ring_id :
4019                                                 INVALID_HW_RING_ID);
4020                         ring->fw_ring_id = INVALID_HW_RING_ID;
4021                         bp->grp_info[grp_idx].rx_fw_ring_id =
4022                                 INVALID_HW_RING_ID;
4023                 }
4024         }
4025
4026         for (i = 0; i < bp->rx_nr_rings; i++) {
4027                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4028                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
4029                 u32 grp_idx = rxr->bnapi->index;
4030                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4031
4032                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4033                         hwrm_ring_free_send_msg(bp, ring,
4034                                                 RING_FREE_REQ_RING_TYPE_RX,
4035                                                 close_path ? cmpl_ring_id :
4036                                                 INVALID_HW_RING_ID);
4037                         ring->fw_ring_id = INVALID_HW_RING_ID;
4038                         bp->grp_info[grp_idx].agg_fw_ring_id =
4039                                 INVALID_HW_RING_ID;
4040                 }
4041         }
4042
4043         for (i = 0; i < bp->cp_nr_rings; i++) {
4044                 struct bnxt_napi *bnapi = bp->bnapi[i];
4045                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4046                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4047
4048                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4049                         hwrm_ring_free_send_msg(bp, ring,
4050                                                 RING_FREE_REQ_RING_TYPE_CMPL,
4051                                                 INVALID_HW_RING_ID);
4052                         ring->fw_ring_id = INVALID_HW_RING_ID;
4053                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4054                 }
4055         }
4056 }
4057
4058 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4059         u32 buf_tmrs, u16 flags,
4060         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4061 {
4062         req->flags = cpu_to_le16(flags);
4063         req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4064         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4065         req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4066         req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4067         /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4068         req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4069         req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4070         req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4071 }
4072
4073 int bnxt_hwrm_set_coal(struct bnxt *bp)
4074 {
4075         int i, rc = 0;
4076         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4077                                                            req_tx = {0}, *req;
4078         u16 max_buf, max_buf_irq;
4079         u16 buf_tmr, buf_tmr_irq;
4080         u32 flags;
4081
4082         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4083                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4084         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4085                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4086
4087         /* Each rx completion (2 records) should be DMAed immediately.
4088          * DMA 1/4 of the completion buffers at a time.
4089          */
4090         max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
4091         /* max_buf must not be zero */
4092         max_buf = clamp_t(u16, max_buf, 1, 63);
4093         max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4094         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4095         /* buf timer set to 1/4 of interrupt timer */
4096         buf_tmr = max_t(u16, buf_tmr / 4, 1);
4097         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4098         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4099
4100         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4101
4102         /* RING_IDLE generates more IRQs for lower latency.  Enable it only
4103          * if coal_ticks is less than 25 us.
4104          */
4105         if (bp->rx_coal_ticks < 25)
4106                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4107
4108         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4109                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4110
4111         /* max_buf must not be zero */
4112         max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4113         max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4114         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4115         /* buf timer set to 1/4 of interrupt timer */
4116         buf_tmr = max_t(u16, buf_tmr / 4, 1);
4117         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4118         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4119
4120         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4121         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4122                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
4123
4124         mutex_lock(&bp->hwrm_cmd_lock);
4125         for (i = 0; i < bp->cp_nr_rings; i++) {
4126                 struct bnxt_napi *bnapi = bp->bnapi[i];
4127
4128                 req = &req_rx;
4129                 if (!bnapi->rx_ring)
4130                         req = &req_tx;
4131                 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4132
4133                 rc = _hwrm_send_message(bp, req, sizeof(*req),
4134                                         HWRM_CMD_TIMEOUT);
4135                 if (rc)
4136                         break;
4137         }
4138         mutex_unlock(&bp->hwrm_cmd_lock);
4139         return rc;
4140 }
4141
4142 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4143 {
4144         int rc = 0, i;
4145         struct hwrm_stat_ctx_free_input req = {0};
4146
4147         if (!bp->bnapi)
4148                 return 0;
4149
4150         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4151                 return 0;
4152
4153         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4154
4155         mutex_lock(&bp->hwrm_cmd_lock);
4156         for (i = 0; i < bp->cp_nr_rings; i++) {
4157                 struct bnxt_napi *bnapi = bp->bnapi[i];
4158                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4159
4160                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4161                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4162
4163                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4164                                                 HWRM_CMD_TIMEOUT);
4165                         if (rc)
4166                                 break;
4167
4168                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4169                 }
4170         }
4171         mutex_unlock(&bp->hwrm_cmd_lock);
4172         return rc;
4173 }
4174
4175 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4176 {
4177         int rc = 0, i;
4178         struct hwrm_stat_ctx_alloc_input req = {0};
4179         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4180
4181         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4182                 return 0;
4183
4184         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4185
4186         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
4187
4188         mutex_lock(&bp->hwrm_cmd_lock);
4189         for (i = 0; i < bp->cp_nr_rings; i++) {
4190                 struct bnxt_napi *bnapi = bp->bnapi[i];
4191                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4192
4193                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4194
4195                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4196                                         HWRM_CMD_TIMEOUT);
4197                 if (rc)
4198                         break;
4199
4200                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4201
4202                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4203         }
4204         mutex_unlock(&bp->hwrm_cmd_lock);
4205         return rc;
4206 }
4207
4208 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4209 {
4210         struct hwrm_func_qcfg_input req = {0};
4211         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4212         int rc;
4213
4214         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4215         req.fid = cpu_to_le16(0xffff);
4216         mutex_lock(&bp->hwrm_cmd_lock);
4217         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4218         if (rc)
4219                 goto func_qcfg_exit;
4220
4221 #ifdef CONFIG_BNXT_SRIOV
4222         if (BNXT_VF(bp)) {
4223                 struct bnxt_vf_info *vf = &bp->vf;
4224
4225                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4226         }
4227 #endif
4228         switch (resp->port_partition_type) {
4229         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4230         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4231         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4232                 bp->port_partition_type = resp->port_partition_type;
4233                 break;
4234         }
4235
4236 func_qcfg_exit:
4237         mutex_unlock(&bp->hwrm_cmd_lock);
4238         return rc;
4239 }
4240
4241 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4242 {
4243         int rc = 0;
4244         struct hwrm_func_qcaps_input req = {0};
4245         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4246
4247         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4248         req.fid = cpu_to_le16(0xffff);
4249
4250         mutex_lock(&bp->hwrm_cmd_lock);
4251         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4252         if (rc)
4253                 goto hwrm_func_qcaps_exit;
4254
4255         bp->tx_push_thresh = 0;
4256         if (resp->flags &
4257             cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4258                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4259
4260         if (BNXT_PF(bp)) {
4261                 struct bnxt_pf_info *pf = &bp->pf;
4262
4263                 pf->fw_fid = le16_to_cpu(resp->fid);
4264                 pf->port_id = le16_to_cpu(resp->port_id);
4265                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
4266                 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
4267                 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4268                 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4269                 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4270                 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4271                 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4272                 if (!pf->max_hw_ring_grps)
4273                         pf->max_hw_ring_grps = pf->max_tx_rings;
4274                 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4275                 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4276                 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4277                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4278                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4279                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4280                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4281                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4282                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4283                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4284                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4285         } else {
4286 #ifdef CONFIG_BNXT_SRIOV
4287                 struct bnxt_vf_info *vf = &bp->vf;
4288
4289                 vf->fw_fid = le16_to_cpu(resp->fid);
4290
4291                 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4292                 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4293                 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4294                 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4295                 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4296                 if (!vf->max_hw_ring_grps)
4297                         vf->max_hw_ring_grps = vf->max_tx_rings;
4298                 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4299                 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4300                 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4301
4302                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
4303                 mutex_unlock(&bp->hwrm_cmd_lock);
4304
4305                 if (is_valid_ether_addr(vf->mac_addr)) {
4306                         /* overwrite netdev dev_adr with admin VF MAC */
4307                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
4308                 } else {
4309                         random_ether_addr(bp->dev->dev_addr);
4310                         rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4311                 }
4312                 return rc;
4313 #endif
4314         }
4315
4316 hwrm_func_qcaps_exit:
4317         mutex_unlock(&bp->hwrm_cmd_lock);
4318         return rc;
4319 }
4320
4321 static int bnxt_hwrm_func_reset(struct bnxt *bp)
4322 {
4323         struct hwrm_func_reset_input req = {0};
4324
4325         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4326         req.enables = 0;
4327
4328         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4329 }
4330
4331 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4332 {
4333         int rc = 0;
4334         struct hwrm_queue_qportcfg_input req = {0};
4335         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4336         u8 i, *qptr;
4337
4338         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4339
4340         mutex_lock(&bp->hwrm_cmd_lock);
4341         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4342         if (rc)
4343                 goto qportcfg_exit;
4344
4345         if (!resp->max_configurable_queues) {
4346                 rc = -EINVAL;
4347                 goto qportcfg_exit;
4348         }
4349         bp->max_tc = resp->max_configurable_queues;
4350         if (bp->max_tc > BNXT_MAX_QUEUE)
4351                 bp->max_tc = BNXT_MAX_QUEUE;
4352
4353         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4354                 bp->max_tc = 1;
4355
4356         qptr = &resp->queue_id0;
4357         for (i = 0; i < bp->max_tc; i++) {
4358                 bp->q_info[i].queue_id = *qptr++;
4359                 bp->q_info[i].queue_profile = *qptr++;
4360         }
4361
4362 qportcfg_exit:
4363         mutex_unlock(&bp->hwrm_cmd_lock);
4364         return rc;
4365 }
4366
4367 static int bnxt_hwrm_ver_get(struct bnxt *bp)
4368 {
4369         int rc;
4370         struct hwrm_ver_get_input req = {0};
4371         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4372
4373         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
4374         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4375         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4376         req.hwrm_intf_min = HWRM_VERSION_MINOR;
4377         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4378         mutex_lock(&bp->hwrm_cmd_lock);
4379         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4380         if (rc)
4381                 goto hwrm_ver_get_exit;
4382
4383         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4384
4385         bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4386                              resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
4387         if (resp->hwrm_intf_maj < 1) {
4388                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
4389                             resp->hwrm_intf_maj, resp->hwrm_intf_min,
4390                             resp->hwrm_intf_upd);
4391                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
4392         }
4393         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
4394                  resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4395                  resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4396
4397         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4398         if (!bp->hwrm_cmd_timeout)
4399                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4400
4401         if (resp->hwrm_intf_maj >= 1)
4402                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4403
4404         bp->chip_num = le16_to_cpu(resp->chip_num);
4405         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4406             !resp->chip_metal)
4407                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
4408
4409 hwrm_ver_get_exit:
4410         mutex_unlock(&bp->hwrm_cmd_lock);
4411         return rc;
4412 }
4413
4414 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4415 {
4416 #if IS_ENABLED(CONFIG_RTC_LIB)
4417         struct hwrm_fw_set_time_input req = {0};
4418         struct rtc_time tm;
4419         struct timeval tv;
4420
4421         if (bp->hwrm_spec_code < 0x10400)
4422                 return -EOPNOTSUPP;
4423
4424         do_gettimeofday(&tv);
4425         rtc_time_to_tm(tv.tv_sec, &tm);
4426         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4427         req.year = cpu_to_le16(1900 + tm.tm_year);
4428         req.month = 1 + tm.tm_mon;
4429         req.day = tm.tm_mday;
4430         req.hour = tm.tm_hour;
4431         req.minute = tm.tm_min;
4432         req.second = tm.tm_sec;
4433         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4434 #else
4435         return -EOPNOTSUPP;
4436 #endif
4437 }
4438
4439 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4440 {
4441         int rc;
4442         struct bnxt_pf_info *pf = &bp->pf;
4443         struct hwrm_port_qstats_input req = {0};
4444
4445         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4446                 return 0;
4447
4448         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4449         req.port_id = cpu_to_le16(pf->port_id);
4450         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4451         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4452         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4453         return rc;
4454 }
4455
4456 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4457 {
4458         if (bp->vxlan_port_cnt) {
4459                 bnxt_hwrm_tunnel_dst_port_free(
4460                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4461         }
4462         bp->vxlan_port_cnt = 0;
4463         if (bp->nge_port_cnt) {
4464                 bnxt_hwrm_tunnel_dst_port_free(
4465                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4466         }
4467         bp->nge_port_cnt = 0;
4468 }
4469
4470 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4471 {
4472         int rc, i;
4473         u32 tpa_flags = 0;
4474
4475         if (set_tpa)
4476                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4477         for (i = 0; i < bp->nr_vnics; i++) {
4478                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4479                 if (rc) {
4480                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4481                                    rc, i);
4482                         return rc;
4483                 }
4484         }
4485         return 0;
4486 }
4487
4488 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4489 {
4490         int i;
4491
4492         for (i = 0; i < bp->nr_vnics; i++)
4493                 bnxt_hwrm_vnic_set_rss(bp, i, false);
4494 }
4495
4496 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4497                                     bool irq_re_init)
4498 {
4499         if (bp->vnic_info) {
4500                 bnxt_hwrm_clear_vnic_filter(bp);
4501                 /* clear all RSS setting before free vnic ctx */
4502                 bnxt_hwrm_clear_vnic_rss(bp);
4503                 bnxt_hwrm_vnic_ctx_free(bp);
4504                 /* before free the vnic, undo the vnic tpa settings */
4505                 if (bp->flags & BNXT_FLAG_TPA)
4506                         bnxt_set_tpa(bp, false);
4507                 bnxt_hwrm_vnic_free(bp);
4508         }
4509         bnxt_hwrm_ring_free(bp, close_path);
4510         bnxt_hwrm_ring_grp_free(bp);
4511         if (irq_re_init) {
4512                 bnxt_hwrm_stat_ctx_free(bp);
4513                 bnxt_hwrm_free_tunnel_ports(bp);
4514         }
4515 }
4516
4517 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4518 {
4519         int rc;
4520
4521         /* allocate context for vnic */
4522         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
4523         if (rc) {
4524                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4525                            vnic_id, rc);
4526                 goto vnic_setup_err;
4527         }
4528         bp->rsscos_nr_ctxs++;
4529
4530         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4531                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
4532                 if (rc) {
4533                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4534                                    vnic_id, rc);
4535                         goto vnic_setup_err;
4536                 }
4537                 bp->rsscos_nr_ctxs++;
4538         }
4539
4540         /* configure default vnic, ring grp */
4541         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4542         if (rc) {
4543                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4544                            vnic_id, rc);
4545                 goto vnic_setup_err;
4546         }
4547
4548         /* Enable RSS hashing on vnic */
4549         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4550         if (rc) {
4551                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4552                            vnic_id, rc);
4553                 goto vnic_setup_err;
4554         }
4555
4556         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4557                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4558                 if (rc) {
4559                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4560                                    vnic_id, rc);
4561                 }
4562         }
4563
4564 vnic_setup_err:
4565         return rc;
4566 }
4567
4568 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4569 {
4570 #ifdef CONFIG_RFS_ACCEL
4571         int i, rc = 0;
4572
4573         for (i = 0; i < bp->rx_nr_rings; i++) {
4574                 u16 vnic_id = i + 1;
4575                 u16 ring_id = i;
4576
4577                 if (vnic_id >= bp->nr_vnics)
4578                         break;
4579
4580                 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
4581                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
4582                 if (rc) {
4583                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4584                                    vnic_id, rc);
4585                         break;
4586                 }
4587                 rc = bnxt_setup_vnic(bp, vnic_id);
4588                 if (rc)
4589                         break;
4590         }
4591         return rc;
4592 #else
4593         return 0;
4594 #endif
4595 }
4596
4597 /* Allow PF and VF with default VLAN to be in promiscuous mode */
4598 static bool bnxt_promisc_ok(struct bnxt *bp)
4599 {
4600 #ifdef CONFIG_BNXT_SRIOV
4601         if (BNXT_VF(bp) && !bp->vf.vlan)
4602                 return false;
4603 #endif
4604         return true;
4605 }
4606
4607 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
4608 {
4609         unsigned int rc = 0;
4610
4611         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
4612         if (rc) {
4613                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4614                            rc);
4615                 return rc;
4616         }
4617
4618         rc = bnxt_hwrm_vnic_cfg(bp, 1);
4619         if (rc) {
4620                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4621                            rc);
4622                 return rc;
4623         }
4624         return rc;
4625 }
4626
4627 static int bnxt_cfg_rx_mode(struct bnxt *);
4628 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
4629
4630 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4631 {
4632         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4633         int rc = 0;
4634         unsigned int rx_nr_rings = bp->rx_nr_rings;
4635
4636         if (irq_re_init) {
4637                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
4638                 if (rc) {
4639                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4640                                    rc);
4641                         goto err_out;
4642                 }
4643         }
4644
4645         rc = bnxt_hwrm_ring_alloc(bp);
4646         if (rc) {
4647                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
4648                 goto err_out;
4649         }
4650
4651         rc = bnxt_hwrm_ring_grp_alloc(bp);
4652         if (rc) {
4653                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
4654                 goto err_out;
4655         }
4656
4657         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4658                 rx_nr_rings--;
4659
4660         /* default vnic 0 */
4661         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
4662         if (rc) {
4663                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
4664                 goto err_out;
4665         }
4666
4667         rc = bnxt_setup_vnic(bp, 0);
4668         if (rc)
4669                 goto err_out;
4670
4671         if (bp->flags & BNXT_FLAG_RFS) {
4672                 rc = bnxt_alloc_rfs_vnics(bp);
4673                 if (rc)
4674                         goto err_out;
4675         }
4676
4677         if (bp->flags & BNXT_FLAG_TPA) {
4678                 rc = bnxt_set_tpa(bp, true);
4679                 if (rc)
4680                         goto err_out;
4681         }
4682
4683         if (BNXT_VF(bp))
4684                 bnxt_update_vf_mac(bp);
4685
4686         /* Filter for default vnic 0 */
4687         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
4688         if (rc) {
4689                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4690                 goto err_out;
4691         }
4692         vnic->uc_filter_count = 1;
4693
4694         vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
4695
4696         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
4697                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4698
4699         if (bp->dev->flags & IFF_ALLMULTI) {
4700                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4701                 vnic->mc_list_count = 0;
4702         } else {
4703                 u32 mask = 0;
4704
4705                 bnxt_mc_list_updated(bp, &mask);
4706                 vnic->rx_mask |= mask;
4707         }
4708
4709         rc = bnxt_cfg_rx_mode(bp);
4710         if (rc)
4711                 goto err_out;
4712
4713         rc = bnxt_hwrm_set_coal(bp);
4714         if (rc)
4715                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
4716                                 rc);
4717
4718         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4719                 rc = bnxt_setup_nitroa0_vnic(bp);
4720                 if (rc)
4721                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
4722                                    rc);
4723         }
4724
4725         if (BNXT_VF(bp)) {
4726                 bnxt_hwrm_func_qcfg(bp);
4727                 netdev_update_features(bp->dev);
4728         }
4729
4730         return 0;
4731
4732 err_out:
4733         bnxt_hwrm_resource_free(bp, 0, true);
4734
4735         return rc;
4736 }
4737
4738 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
4739 {
4740         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
4741         return 0;
4742 }
4743
4744 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
4745 {
4746         bnxt_init_cp_rings(bp);
4747         bnxt_init_rx_rings(bp);
4748         bnxt_init_tx_rings(bp);
4749         bnxt_init_ring_grps(bp, irq_re_init);
4750         bnxt_init_vnics(bp);
4751
4752         return bnxt_init_chip(bp, irq_re_init);
4753 }
4754
4755 static void bnxt_disable_int(struct bnxt *bp)
4756 {
4757         int i;
4758
4759         if (!bp->bnapi)
4760                 return;
4761
4762         for (i = 0; i < bp->cp_nr_rings; i++) {
4763                 struct bnxt_napi *bnapi = bp->bnapi[i];
4764                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4765
4766                 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4767         }
4768 }
4769
4770 static void bnxt_enable_int(struct bnxt *bp)
4771 {
4772         int i;
4773
4774         atomic_set(&bp->intr_sem, 0);
4775         for (i = 0; i < bp->cp_nr_rings; i++) {
4776                 struct bnxt_napi *bnapi = bp->bnapi[i];
4777                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4778
4779                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4780         }
4781 }
4782
4783 static int bnxt_set_real_num_queues(struct bnxt *bp)
4784 {
4785         int rc;
4786         struct net_device *dev = bp->dev;
4787
4788         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4789         if (rc)
4790                 return rc;
4791
4792         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4793         if (rc)
4794                 return rc;
4795
4796 #ifdef CONFIG_RFS_ACCEL
4797         if (bp->flags & BNXT_FLAG_RFS)
4798                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
4799 #endif
4800
4801         return rc;
4802 }
4803
4804 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4805                            bool shared)
4806 {
4807         int _rx = *rx, _tx = *tx;
4808
4809         if (shared) {
4810                 *rx = min_t(int, _rx, max);
4811                 *tx = min_t(int, _tx, max);
4812         } else {
4813                 if (max < 2)
4814                         return -ENOMEM;
4815
4816                 while (_rx + _tx > max) {
4817                         if (_rx > _tx && _rx > 1)
4818                                 _rx--;
4819                         else if (_tx > 1)
4820                                 _tx--;
4821                 }
4822                 *rx = _rx;
4823                 *tx = _tx;
4824         }
4825         return 0;
4826 }
4827
4828 static int bnxt_setup_msix(struct bnxt *bp)
4829 {
4830         struct msix_entry *msix_ent;
4831         struct net_device *dev = bp->dev;
4832         int i, total_vecs, rc = 0, min = 1;
4833         const int len = sizeof(bp->irq_tbl[0].name);
4834
4835         bp->flags &= ~BNXT_FLAG_USING_MSIX;
4836         total_vecs = bp->cp_nr_rings;
4837
4838         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4839         if (!msix_ent)
4840                 return -ENOMEM;
4841
4842         for (i = 0; i < total_vecs; i++) {
4843                 msix_ent[i].entry = i;
4844                 msix_ent[i].vector = 0;
4845         }
4846
4847         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
4848                 min = 2;
4849
4850         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
4851         if (total_vecs < 0) {
4852                 rc = -ENODEV;
4853                 goto msix_setup_exit;
4854         }
4855
4856         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4857         if (bp->irq_tbl) {
4858                 int tcs;
4859
4860                 /* Trim rings based upon num of vectors allocated */
4861                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
4862                                      total_vecs, min == 1);
4863                 if (rc)
4864                         goto msix_setup_exit;
4865
4866                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4867                 tcs = netdev_get_num_tc(dev);
4868                 if (tcs > 1) {
4869                         bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4870                         if (bp->tx_nr_rings_per_tc == 0) {
4871                                 netdev_reset_tc(dev);
4872                                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4873                         } else {
4874                                 int i, off, count;
4875
4876                                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4877                                 for (i = 0; i < tcs; i++) {
4878                                         count = bp->tx_nr_rings_per_tc;
4879                                         off = i * count;
4880                                         netdev_set_tc_queue(dev, i, count, off);
4881                                 }
4882                         }
4883                 }
4884                 bp->cp_nr_rings = total_vecs;
4885
4886                 for (i = 0; i < bp->cp_nr_rings; i++) {
4887                         char *attr;
4888
4889                         bp->irq_tbl[i].vector = msix_ent[i].vector;
4890                         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4891                                 attr = "TxRx";
4892                         else if (i < bp->rx_nr_rings)
4893                                 attr = "rx";
4894                         else
4895                                 attr = "tx";
4896
4897                         snprintf(bp->irq_tbl[i].name, len,
4898                                  "%s-%s-%d", dev->name, attr, i);
4899                         bp->irq_tbl[i].handler = bnxt_msix;
4900                 }
4901                 rc = bnxt_set_real_num_queues(bp);
4902                 if (rc)
4903                         goto msix_setup_exit;
4904         } else {
4905                 rc = -ENOMEM;
4906                 goto msix_setup_exit;
4907         }
4908         bp->flags |= BNXT_FLAG_USING_MSIX;
4909         kfree(msix_ent);
4910         return 0;
4911
4912 msix_setup_exit:
4913         netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4914         pci_disable_msix(bp->pdev);
4915         kfree(msix_ent);
4916         return rc;
4917 }
4918
4919 static int bnxt_setup_inta(struct bnxt *bp)
4920 {
4921         int rc;
4922         const int len = sizeof(bp->irq_tbl[0].name);
4923
4924         if (netdev_get_num_tc(bp->dev))
4925                 netdev_reset_tc(bp->dev);
4926
4927         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4928         if (!bp->irq_tbl) {
4929                 rc = -ENOMEM;
4930                 return rc;
4931         }
4932         bp->rx_nr_rings = 1;
4933         bp->tx_nr_rings = 1;
4934         bp->cp_nr_rings = 1;
4935         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4936         bp->flags |= BNXT_FLAG_SHARED_RINGS;
4937         bp->irq_tbl[0].vector = bp->pdev->irq;
4938         snprintf(bp->irq_tbl[0].name, len,
4939                  "%s-%s-%d", bp->dev->name, "TxRx", 0);
4940         bp->irq_tbl[0].handler = bnxt_inta;
4941         rc = bnxt_set_real_num_queues(bp);
4942         return rc;
4943 }
4944
4945 static int bnxt_setup_int_mode(struct bnxt *bp)
4946 {
4947         int rc = 0;
4948
4949         if (bp->flags & BNXT_FLAG_MSIX_CAP)
4950                 rc = bnxt_setup_msix(bp);
4951
4952         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
4953                 /* fallback to INTA */
4954                 rc = bnxt_setup_inta(bp);
4955         }
4956         return rc;
4957 }
4958
4959 static void bnxt_free_irq(struct bnxt *bp)
4960 {
4961         struct bnxt_irq *irq;
4962         int i;
4963
4964 #ifdef CONFIG_RFS_ACCEL
4965         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4966         bp->dev->rx_cpu_rmap = NULL;
4967 #endif
4968         if (!bp->irq_tbl)
4969                 return;
4970
4971         for (i = 0; i < bp->cp_nr_rings; i++) {
4972                 irq = &bp->irq_tbl[i];
4973                 if (irq->requested)
4974                         free_irq(irq->vector, bp->bnapi[i]);
4975                 irq->requested = 0;
4976         }
4977         if (bp->flags & BNXT_FLAG_USING_MSIX)
4978                 pci_disable_msix(bp->pdev);
4979         kfree(bp->irq_tbl);
4980         bp->irq_tbl = NULL;
4981 }
4982
4983 static int bnxt_request_irq(struct bnxt *bp)
4984 {
4985         int i, j, rc = 0;
4986         unsigned long flags = 0;
4987 #ifdef CONFIG_RFS_ACCEL
4988         struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4989 #endif
4990
4991         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4992                 flags = IRQF_SHARED;
4993
4994         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4995                 struct bnxt_irq *irq = &bp->irq_tbl[i];
4996 #ifdef CONFIG_RFS_ACCEL
4997                 if (rmap && bp->bnapi[i]->rx_ring) {
4998                         rc = irq_cpu_rmap_add(rmap, irq->vector);
4999                         if (rc)
5000                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
5001                                             j);
5002                         j++;
5003                 }
5004 #endif
5005                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5006                                  bp->bnapi[i]);
5007                 if (rc)
5008                         break;
5009
5010                 irq->requested = 1;
5011         }
5012         return rc;
5013 }
5014
5015 static void bnxt_del_napi(struct bnxt *bp)
5016 {
5017         int i;
5018
5019         if (!bp->bnapi)
5020                 return;
5021
5022         for (i = 0; i < bp->cp_nr_rings; i++) {
5023                 struct bnxt_napi *bnapi = bp->bnapi[i];
5024
5025                 napi_hash_del(&bnapi->napi);
5026                 netif_napi_del(&bnapi->napi);
5027         }
5028         /* We called napi_hash_del() before netif_napi_del(), we need
5029          * to respect an RCU grace period before freeing napi structures.
5030          */
5031         synchronize_net();
5032 }
5033
5034 static void bnxt_init_napi(struct bnxt *bp)
5035 {
5036         int i;
5037         unsigned int cp_nr_rings = bp->cp_nr_rings;
5038         struct bnxt_napi *bnapi;
5039
5040         if (bp->flags & BNXT_FLAG_USING_MSIX) {
5041                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5042                         cp_nr_rings--;
5043                 for (i = 0; i < cp_nr_rings; i++) {
5044                         bnapi = bp->bnapi[i];
5045                         netif_napi_add(bp->dev, &bnapi->napi,
5046                                        bnxt_poll, 64);
5047                 }
5048                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5049                         bnapi = bp->bnapi[cp_nr_rings];
5050                         netif_napi_add(bp->dev, &bnapi->napi,
5051                                        bnxt_poll_nitroa0, 64);
5052                         napi_hash_add(&bnapi->napi);
5053                 }
5054         } else {
5055                 bnapi = bp->bnapi[0];
5056                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
5057         }
5058 }
5059
5060 static void bnxt_disable_napi(struct bnxt *bp)
5061 {
5062         int i;
5063
5064         if (!bp->bnapi)
5065                 return;
5066
5067         for (i = 0; i < bp->cp_nr_rings; i++) {
5068                 napi_disable(&bp->bnapi[i]->napi);
5069                 bnxt_disable_poll(bp->bnapi[i]);
5070         }
5071 }
5072
5073 static void bnxt_enable_napi(struct bnxt *bp)
5074 {
5075         int i;
5076
5077         for (i = 0; i < bp->cp_nr_rings; i++) {
5078                 bp->bnapi[i]->in_reset = false;
5079                 bnxt_enable_poll(bp->bnapi[i]);
5080                 napi_enable(&bp->bnapi[i]->napi);
5081         }
5082 }
5083
5084 static void bnxt_tx_disable(struct bnxt *bp)
5085 {
5086         int i;
5087         struct bnxt_tx_ring_info *txr;
5088         struct netdev_queue *txq;
5089
5090         if (bp->tx_ring) {
5091                 for (i = 0; i < bp->tx_nr_rings; i++) {
5092                         txr = &bp->tx_ring[i];
5093                         txq = netdev_get_tx_queue(bp->dev, i);
5094                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
5095                 }
5096         }
5097         /* Drop carrier first to prevent TX timeout */
5098         netif_carrier_off(bp->dev);
5099         /* Stop all TX queues */
5100         netif_tx_disable(bp->dev);
5101 }
5102
5103 static void bnxt_tx_enable(struct bnxt *bp)
5104 {
5105         int i;
5106         struct bnxt_tx_ring_info *txr;
5107         struct netdev_queue *txq;
5108
5109         for (i = 0; i < bp->tx_nr_rings; i++) {
5110                 txr = &bp->tx_ring[i];
5111                 txq = netdev_get_tx_queue(bp->dev, i);
5112                 txr->dev_state = 0;
5113         }
5114         netif_tx_wake_all_queues(bp->dev);
5115         if (bp->link_info.link_up)
5116                 netif_carrier_on(bp->dev);
5117 }
5118
5119 static void bnxt_report_link(struct bnxt *bp)
5120 {
5121         if (bp->link_info.link_up) {
5122                 const char *duplex;
5123                 const char *flow_ctrl;
5124                 u16 speed;
5125
5126                 netif_carrier_on(bp->dev);
5127                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5128                         duplex = "full";
5129                 else
5130                         duplex = "half";
5131                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5132                         flow_ctrl = "ON - receive & transmit";
5133                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5134                         flow_ctrl = "ON - transmit";
5135                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5136                         flow_ctrl = "ON - receive";
5137                 else
5138                         flow_ctrl = "none";
5139                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5140                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
5141                             speed, duplex, flow_ctrl);
5142                 if (bp->flags & BNXT_FLAG_EEE_CAP)
5143                         netdev_info(bp->dev, "EEE is %s\n",
5144                                     bp->eee.eee_active ? "active" :
5145                                                          "not active");
5146         } else {
5147                 netif_carrier_off(bp->dev);
5148                 netdev_err(bp->dev, "NIC Link is Down\n");
5149         }
5150 }
5151
5152 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5153 {
5154         int rc = 0;
5155         struct hwrm_port_phy_qcaps_input req = {0};
5156         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5157         struct bnxt_link_info *link_info = &bp->link_info;
5158
5159         if (bp->hwrm_spec_code < 0x10201)
5160                 return 0;
5161
5162         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5163
5164         mutex_lock(&bp->hwrm_cmd_lock);
5165         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5166         if (rc)
5167                 goto hwrm_phy_qcaps_exit;
5168
5169         if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
5170                 struct ethtool_eee *eee = &bp->eee;
5171                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5172
5173                 bp->flags |= BNXT_FLAG_EEE_CAP;
5174                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5175                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5176                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5177                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5178                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5179         }
5180         if (resp->supported_speeds_auto_mode)
5181                 link_info->support_auto_speeds =
5182                         le16_to_cpu(resp->supported_speeds_auto_mode);
5183
5184 hwrm_phy_qcaps_exit:
5185         mutex_unlock(&bp->hwrm_cmd_lock);
5186         return rc;
5187 }
5188
5189 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5190 {
5191         int rc = 0;
5192         struct bnxt_link_info *link_info = &bp->link_info;
5193         struct hwrm_port_phy_qcfg_input req = {0};
5194         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5195         u8 link_up = link_info->link_up;
5196         u16 diff;
5197
5198         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5199
5200         mutex_lock(&bp->hwrm_cmd_lock);
5201         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5202         if (rc) {
5203                 mutex_unlock(&bp->hwrm_cmd_lock);
5204                 return rc;
5205         }
5206
5207         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5208         link_info->phy_link_status = resp->link;
5209         link_info->duplex =  resp->duplex;
5210         link_info->pause = resp->pause;
5211         link_info->auto_mode = resp->auto_mode;
5212         link_info->auto_pause_setting = resp->auto_pause;
5213         link_info->lp_pause = resp->link_partner_adv_pause;
5214         link_info->force_pause_setting = resp->force_pause;
5215         link_info->duplex_setting = resp->duplex;
5216         if (link_info->phy_link_status == BNXT_LINK_LINK)
5217                 link_info->link_speed = le16_to_cpu(resp->link_speed);
5218         else
5219                 link_info->link_speed = 0;
5220         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
5221         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5222         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
5223         link_info->lp_auto_link_speeds =
5224                 le16_to_cpu(resp->link_partner_adv_speeds);
5225         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5226         link_info->phy_ver[0] = resp->phy_maj;
5227         link_info->phy_ver[1] = resp->phy_min;
5228         link_info->phy_ver[2] = resp->phy_bld;
5229         link_info->media_type = resp->media_type;
5230         link_info->phy_type = resp->phy_type;
5231         link_info->transceiver = resp->xcvr_pkg_type;
5232         link_info->phy_addr = resp->eee_config_phy_addr &
5233                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
5234         link_info->module_status = resp->module_status;
5235
5236         if (bp->flags & BNXT_FLAG_EEE_CAP) {
5237                 struct ethtool_eee *eee = &bp->eee;
5238                 u16 fw_speeds;
5239
5240                 eee->eee_active = 0;
5241                 if (resp->eee_config_phy_addr &
5242                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5243                         eee->eee_active = 1;
5244                         fw_speeds = le16_to_cpu(
5245                                 resp->link_partner_adv_eee_link_speed_mask);
5246                         eee->lp_advertised =
5247                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5248                 }
5249
5250                 /* Pull initial EEE config */
5251                 if (!chng_link_state) {
5252                         if (resp->eee_config_phy_addr &
5253                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5254                                 eee->eee_enabled = 1;
5255
5256                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5257                         eee->advertised =
5258                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5259
5260                         if (resp->eee_config_phy_addr &
5261                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5262                                 __le32 tmr;
5263
5264                                 eee->tx_lpi_enabled = 1;
5265                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5266                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5267                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5268                         }
5269                 }
5270         }
5271         /* TODO: need to add more logic to report VF link */
5272         if (chng_link_state) {
5273                 if (link_info->phy_link_status == BNXT_LINK_LINK)
5274                         link_info->link_up = 1;
5275                 else
5276                         link_info->link_up = 0;
5277                 if (link_up != link_info->link_up)
5278                         bnxt_report_link(bp);
5279         } else {
5280                 /* alwasy link down if not require to update link state */
5281                 link_info->link_up = 0;
5282         }
5283         mutex_unlock(&bp->hwrm_cmd_lock);
5284
5285         if (!BNXT_SINGLE_PF(bp))
5286                 return 0;
5287
5288         diff = link_info->support_auto_speeds ^ link_info->advertising;
5289         if ((link_info->support_auto_speeds | diff) !=
5290             link_info->support_auto_speeds) {
5291                 /* An advertised speed is no longer supported, so we need to
5292                  * update the advertisement settings.  Caller holds RTNL
5293                  * so we can modify link settings.
5294                  */
5295                 link_info->advertising = link_info->support_auto_speeds;
5296                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5297                         bnxt_hwrm_set_link_setting(bp, true, false);
5298         }
5299         return 0;
5300 }
5301
5302 static void bnxt_get_port_module_status(struct bnxt *bp)
5303 {
5304         struct bnxt_link_info *link_info = &bp->link_info;
5305         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5306         u8 module_status;
5307
5308         if (bnxt_update_link(bp, true))
5309                 return;
5310
5311         module_status = link_info->module_status;
5312         switch (module_status) {
5313         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5314         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5315         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5316                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5317                             bp->pf.port_id);
5318                 if (bp->hwrm_spec_code >= 0x10201) {
5319                         netdev_warn(bp->dev, "Module part number %s\n",
5320                                     resp->phy_vendor_partnumber);
5321                 }
5322                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5323                         netdev_warn(bp->dev, "TX is disabled\n");
5324                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5325                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5326         }
5327 }
5328
5329 static void
5330 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5331 {
5332         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
5333                 if (bp->hwrm_spec_code >= 0x10201)
5334                         req->auto_pause =
5335                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
5336                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5337                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5338                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5339                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
5340                 req->enables |=
5341                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5342         } else {
5343                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5344                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5345                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5346                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5347                 req->enables |=
5348                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
5349                 if (bp->hwrm_spec_code >= 0x10201) {
5350                         req->auto_pause = req->force_pause;
5351                         req->enables |= cpu_to_le32(
5352                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5353                 }
5354         }
5355 }
5356
5357 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5358                                       struct hwrm_port_phy_cfg_input *req)
5359 {
5360         u8 autoneg = bp->link_info.autoneg;
5361         u16 fw_link_speed = bp->link_info.req_link_speed;
5362         u32 advertising = bp->link_info.advertising;
5363
5364         if (autoneg & BNXT_AUTONEG_SPEED) {
5365                 req->auto_mode |=
5366                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
5367
5368                 req->enables |= cpu_to_le32(
5369                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5370                 req->auto_link_speed_mask = cpu_to_le16(advertising);
5371
5372                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5373                 req->flags |=
5374                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5375         } else {
5376                 req->force_link_speed = cpu_to_le16(fw_link_speed);
5377                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5378         }
5379
5380         /* tell chimp that the setting takes effect immediately */
5381         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
5382 }
5383
5384 int bnxt_hwrm_set_pause(struct bnxt *bp)
5385 {
5386         struct hwrm_port_phy_cfg_input req = {0};
5387         int rc;
5388
5389         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5390         bnxt_hwrm_set_pause_common(bp, &req);
5391
5392         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
5393             bp->link_info.force_link_chng)
5394                 bnxt_hwrm_set_link_common(bp, &req);
5395
5396         mutex_lock(&bp->hwrm_cmd_lock);
5397         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5398         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
5399                 /* since changing of pause setting doesn't trigger any link
5400                  * change event, the driver needs to update the current pause
5401                  * result upon successfully return of the phy_cfg command
5402                  */
5403                 bp->link_info.pause =
5404                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
5405                 bp->link_info.auto_pause_setting = 0;
5406                 if (!bp->link_info.force_link_chng)
5407                         bnxt_report_link(bp);
5408         }
5409         bp->link_info.force_link_chng = false;
5410         mutex_unlock(&bp->hwrm_cmd_lock);
5411         return rc;
5412 }
5413
5414 static void bnxt_hwrm_set_eee(struct bnxt *bp,
5415                               struct hwrm_port_phy_cfg_input *req)
5416 {
5417         struct ethtool_eee *eee = &bp->eee;
5418
5419         if (eee->eee_enabled) {
5420                 u16 eee_speeds;
5421                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
5422
5423                 if (eee->tx_lpi_enabled)
5424                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
5425                 else
5426                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
5427
5428                 req->flags |= cpu_to_le32(flags);
5429                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
5430                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
5431                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
5432         } else {
5433                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
5434         }
5435 }
5436
5437 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
5438 {
5439         struct hwrm_port_phy_cfg_input req = {0};
5440
5441         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5442         if (set_pause)
5443                 bnxt_hwrm_set_pause_common(bp, &req);
5444
5445         bnxt_hwrm_set_link_common(bp, &req);
5446
5447         if (set_eee)
5448                 bnxt_hwrm_set_eee(bp, &req);
5449         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5450 }
5451
5452 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
5453 {
5454         struct hwrm_port_phy_cfg_input req = {0};
5455
5456         if (!BNXT_SINGLE_PF(bp))
5457                 return 0;
5458
5459         if (pci_num_vf(bp->pdev))
5460                 return 0;
5461
5462         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5463         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
5464         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5465 }
5466
5467 static bool bnxt_eee_config_ok(struct bnxt *bp)
5468 {
5469         struct ethtool_eee *eee = &bp->eee;
5470         struct bnxt_link_info *link_info = &bp->link_info;
5471
5472         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
5473                 return true;
5474
5475         if (eee->eee_enabled) {
5476                 u32 advertising =
5477                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
5478
5479                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5480                         eee->eee_enabled = 0;
5481                         return false;
5482                 }
5483                 if (eee->advertised & ~advertising) {
5484                         eee->advertised = advertising & eee->supported;
5485                         return false;
5486                 }
5487         }
5488         return true;
5489 }
5490
5491 static int bnxt_update_phy_setting(struct bnxt *bp)
5492 {
5493         int rc;
5494         bool update_link = false;
5495         bool update_pause = false;
5496         bool update_eee = false;
5497         struct bnxt_link_info *link_info = &bp->link_info;
5498
5499         rc = bnxt_update_link(bp, true);
5500         if (rc) {
5501                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
5502                            rc);
5503                 return rc;
5504         }
5505         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
5506             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
5507             link_info->req_flow_ctrl)
5508                 update_pause = true;
5509         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
5510             link_info->force_pause_setting != link_info->req_flow_ctrl)
5511                 update_pause = true;
5512         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5513                 if (BNXT_AUTO_MODE(link_info->auto_mode))
5514                         update_link = true;
5515                 if (link_info->req_link_speed != link_info->force_link_speed)
5516                         update_link = true;
5517                 if (link_info->req_duplex != link_info->duplex_setting)
5518                         update_link = true;
5519         } else {
5520                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
5521                         update_link = true;
5522                 if (link_info->advertising != link_info->auto_link_speeds)
5523                         update_link = true;
5524         }
5525
5526         if (!bnxt_eee_config_ok(bp))
5527                 update_eee = true;
5528
5529         if (update_link)
5530                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
5531         else if (update_pause)
5532                 rc = bnxt_hwrm_set_pause(bp);
5533         if (rc) {
5534                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
5535                            rc);
5536                 return rc;
5537         }
5538
5539         return rc;
5540 }
5541
5542 /* Common routine to pre-map certain register block to different GRC window.
5543  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
5544  * in PF and 3 windows in VF that can be customized to map in different
5545  * register blocks.
5546  */
5547 static void bnxt_preset_reg_win(struct bnxt *bp)
5548 {
5549         if (BNXT_PF(bp)) {
5550                 /* CAG registers map to GRC window #4 */
5551                 writel(BNXT_CAG_REG_BASE,
5552                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
5553         }
5554 }
5555
5556 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5557 {
5558         int rc = 0;
5559
5560         bnxt_preset_reg_win(bp);
5561         netif_carrier_off(bp->dev);
5562         if (irq_re_init) {
5563                 rc = bnxt_setup_int_mode(bp);
5564                 if (rc) {
5565                         netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
5566                                    rc);
5567                         return rc;
5568                 }
5569         }
5570         if ((bp->flags & BNXT_FLAG_RFS) &&
5571             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
5572                 /* disable RFS if falling back to INTA */
5573                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
5574                 bp->flags &= ~BNXT_FLAG_RFS;
5575         }
5576
5577         rc = bnxt_alloc_mem(bp, irq_re_init);
5578         if (rc) {
5579                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
5580                 goto open_err_free_mem;
5581         }
5582
5583         if (irq_re_init) {
5584                 bnxt_init_napi(bp);
5585                 rc = bnxt_request_irq(bp);
5586                 if (rc) {
5587                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
5588                         goto open_err_irq;
5589                 }
5590         }
5591
5592         rc = bnxt_init_nic(bp, irq_re_init);
5593         if (rc) {
5594                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
5595                 goto open_err_irq;
5596         }
5597
5598         bnxt_enable_napi(bp);
5599
5600         if (link_re_init) {
5601                 mutex_lock(&bp->link_lock);
5602                 rc = bnxt_update_phy_setting(bp);
5603                 mutex_unlock(&bp->link_lock);
5604                 if (rc)
5605                         netdev_warn(bp->dev, "failed to update phy settings\n");
5606         }
5607
5608         if (irq_re_init)
5609                 udp_tunnel_get_rx_info(bp->dev);
5610
5611         set_bit(BNXT_STATE_OPEN, &bp->state);
5612         bnxt_enable_int(bp);
5613         /* Enable TX queues */
5614         bnxt_tx_enable(bp);
5615         mod_timer(&bp->timer, jiffies + bp->current_interval);
5616         /* Poll link status and check for SFP+ module status */
5617         bnxt_get_port_module_status(bp);
5618
5619         return 0;
5620
5621 open_err_irq:
5622         bnxt_del_napi(bp);
5623
5624 open_err_free_mem:
5625         bnxt_free_skbs(bp);
5626         bnxt_free_irq(bp);
5627         bnxt_free_mem(bp, true);
5628         return rc;
5629 }
5630
5631 /* rtnl_lock held */
5632 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5633 {
5634         int rc = 0;
5635
5636         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
5637         if (rc) {
5638                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
5639                 dev_close(bp->dev);
5640         }
5641         return rc;
5642 }
5643
5644 static int bnxt_open(struct net_device *dev)
5645 {
5646         struct bnxt *bp = netdev_priv(dev);
5647         int rc = 0;
5648
5649         if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
5650                 rc = bnxt_hwrm_func_reset(bp);
5651                 if (rc) {
5652                         netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
5653                                    rc);
5654                         rc = -EBUSY;
5655                         return rc;
5656                 }
5657                 /* Do func_reset during the 1st PF open only to prevent killing
5658                  * the VFs when the PF is brought down and up.
5659                  */
5660                 if (BNXT_PF(bp))
5661                         set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
5662         }
5663         return __bnxt_open_nic(bp, true, true);
5664 }
5665
5666 static void bnxt_disable_int_sync(struct bnxt *bp)
5667 {
5668         int i;
5669
5670         atomic_inc(&bp->intr_sem);
5671         if (!netif_running(bp->dev))
5672                 return;
5673
5674         bnxt_disable_int(bp);
5675         for (i = 0; i < bp->cp_nr_rings; i++)
5676                 synchronize_irq(bp->irq_tbl[i].vector);
5677 }
5678
5679 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5680 {
5681         int rc = 0;
5682
5683 #ifdef CONFIG_BNXT_SRIOV
5684         if (bp->sriov_cfg) {
5685                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
5686                                                       !bp->sriov_cfg,
5687                                                       BNXT_SRIOV_CFG_WAIT_TMO);
5688                 if (rc)
5689                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
5690         }
5691 #endif
5692         /* Change device state to avoid TX queue wake up's */
5693         bnxt_tx_disable(bp);
5694
5695         clear_bit(BNXT_STATE_OPEN, &bp->state);
5696         smp_mb__after_atomic();
5697         while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
5698                 msleep(20);
5699
5700         /* Flush rings before disabling interrupts */
5701         bnxt_shutdown_nic(bp, irq_re_init);
5702
5703         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
5704
5705         bnxt_disable_napi(bp);
5706         bnxt_disable_int_sync(bp);
5707         del_timer_sync(&bp->timer);
5708         bnxt_free_skbs(bp);
5709
5710         if (irq_re_init) {
5711                 bnxt_free_irq(bp);
5712                 bnxt_del_napi(bp);
5713         }
5714         bnxt_free_mem(bp, irq_re_init);
5715         return rc;
5716 }
5717
5718 static int bnxt_close(struct net_device *dev)
5719 {
5720         struct bnxt *bp = netdev_priv(dev);
5721
5722         bnxt_close_nic(bp, true, true);
5723         bnxt_hwrm_shutdown_link(bp);
5724         return 0;
5725 }
5726
5727 /* rtnl_lock held */
5728 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5729 {
5730         switch (cmd) {
5731         case SIOCGMIIPHY:
5732                 /* fallthru */
5733         case SIOCGMIIREG: {
5734                 if (!netif_running(dev))
5735                         return -EAGAIN;
5736
5737                 return 0;
5738         }
5739
5740         case SIOCSMIIREG:
5741                 if (!netif_running(dev))
5742                         return -EAGAIN;
5743
5744                 return 0;
5745
5746         default:
5747                 /* do nothing */
5748                 break;
5749         }
5750         return -EOPNOTSUPP;
5751 }
5752
5753 static struct rtnl_link_stats64 *
5754 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5755 {
5756         u32 i;
5757         struct bnxt *bp = netdev_priv(dev);
5758
5759         memset(stats, 0, sizeof(struct rtnl_link_stats64));
5760
5761         if (!bp->bnapi)
5762                 return stats;
5763
5764         /* TODO check if we need to synchronize with bnxt_close path */
5765         for (i = 0; i < bp->cp_nr_rings; i++) {
5766                 struct bnxt_napi *bnapi = bp->bnapi[i];
5767                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5768                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
5769
5770                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
5771                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
5772                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
5773
5774                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
5775                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
5776                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
5777
5778                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
5779                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
5780                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
5781
5782                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
5783                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
5784                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
5785
5786                 stats->rx_missed_errors +=
5787                         le64_to_cpu(hw_stats->rx_discard_pkts);
5788
5789                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
5790
5791                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
5792         }
5793
5794         if (bp->flags & BNXT_FLAG_PORT_STATS) {
5795                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
5796                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
5797
5798                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
5799                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
5800                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
5801                                           le64_to_cpu(rx->rx_ovrsz_frames) +
5802                                           le64_to_cpu(rx->rx_runt_frames);
5803                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
5804                                    le64_to_cpu(rx->rx_jbr_frames);
5805                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
5806                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
5807                 stats->tx_errors = le64_to_cpu(tx->tx_err);
5808         }
5809
5810         return stats;
5811 }
5812
5813 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
5814 {
5815         struct net_device *dev = bp->dev;
5816         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5817         struct netdev_hw_addr *ha;
5818         u8 *haddr;
5819         int mc_count = 0;
5820         bool update = false;
5821         int off = 0;
5822
5823         netdev_for_each_mc_addr(ha, dev) {
5824                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
5825                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5826                         vnic->mc_list_count = 0;
5827                         return false;
5828                 }
5829                 haddr = ha->addr;
5830                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
5831                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
5832                         update = true;
5833                 }
5834                 off += ETH_ALEN;
5835                 mc_count++;
5836         }
5837         if (mc_count)
5838                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
5839
5840         if (mc_count != vnic->mc_list_count) {
5841                 vnic->mc_list_count = mc_count;
5842                 update = true;
5843         }
5844         return update;
5845 }
5846
5847 static bool bnxt_uc_list_updated(struct bnxt *bp)
5848 {
5849         struct net_device *dev = bp->dev;
5850         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5851         struct netdev_hw_addr *ha;
5852         int off = 0;
5853
5854         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
5855                 return true;
5856
5857         netdev_for_each_uc_addr(ha, dev) {
5858                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
5859                         return true;
5860
5861                 off += ETH_ALEN;
5862         }
5863         return false;
5864 }
5865
5866 static void bnxt_set_rx_mode(struct net_device *dev)
5867 {
5868         struct bnxt *bp = netdev_priv(dev);
5869         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5870         u32 mask = vnic->rx_mask;
5871         bool mc_update = false;
5872         bool uc_update;
5873
5874         if (!netif_running(dev))
5875                 return;
5876
5877         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
5878                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5879                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5880
5881         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5882                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5883
5884         uc_update = bnxt_uc_list_updated(bp);
5885
5886         if (dev->flags & IFF_ALLMULTI) {
5887                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5888                 vnic->mc_list_count = 0;
5889         } else {
5890                 mc_update = bnxt_mc_list_updated(bp, &mask);
5891         }
5892
5893         if (mask != vnic->rx_mask || uc_update || mc_update) {
5894                 vnic->rx_mask = mask;
5895
5896                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
5897                 schedule_work(&bp->sp_task);
5898         }
5899 }
5900
5901 static int bnxt_cfg_rx_mode(struct bnxt *bp)
5902 {
5903         struct net_device *dev = bp->dev;
5904         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5905         struct netdev_hw_addr *ha;
5906         int i, off = 0, rc;
5907         bool uc_update;
5908
5909         netif_addr_lock_bh(dev);
5910         uc_update = bnxt_uc_list_updated(bp);
5911         netif_addr_unlock_bh(dev);
5912
5913         if (!uc_update)
5914                 goto skip_uc;
5915
5916         mutex_lock(&bp->hwrm_cmd_lock);
5917         for (i = 1; i < vnic->uc_filter_count; i++) {
5918                 struct hwrm_cfa_l2_filter_free_input req = {0};
5919
5920                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
5921                                        -1);
5922
5923                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
5924
5925                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5926                                         HWRM_CMD_TIMEOUT);
5927         }
5928         mutex_unlock(&bp->hwrm_cmd_lock);
5929
5930         vnic->uc_filter_count = 1;
5931
5932         netif_addr_lock_bh(dev);
5933         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
5934                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5935         } else {
5936                 netdev_for_each_uc_addr(ha, dev) {
5937                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
5938                         off += ETH_ALEN;
5939                         vnic->uc_filter_count++;
5940                 }
5941         }
5942         netif_addr_unlock_bh(dev);
5943
5944         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
5945                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
5946                 if (rc) {
5947                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
5948                                    rc);
5949                         vnic->uc_filter_count = i;
5950                         return rc;
5951                 }
5952         }
5953
5954 skip_uc:
5955         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
5956         if (rc && vnic->mc_list_count) {
5957                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
5958                             rc);
5959                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5960                 vnic->mc_list_count = 0;
5961                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
5962         }
5963         if (rc)
5964                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
5965                            rc);
5966
5967         return rc;
5968 }
5969
5970 static bool bnxt_rfs_capable(struct bnxt *bp)
5971 {
5972 #ifdef CONFIG_RFS_ACCEL
5973         struct bnxt_pf_info *pf = &bp->pf;
5974         int vnics;
5975
5976         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
5977                 return false;
5978
5979         vnics = 1 + bp->rx_nr_rings;
5980         if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
5981                 netdev_warn(bp->dev,
5982                             "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
5983                             min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
5984                 return false;
5985         }
5986
5987         return true;
5988 #else
5989         return false;
5990 #endif
5991 }
5992
5993 static netdev_features_t bnxt_fix_features(struct net_device *dev,
5994                                            netdev_features_t features)
5995 {
5996         struct bnxt *bp = netdev_priv(dev);
5997         netdev_features_t vlan_features;
5998
5999         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
6000                 features &= ~NETIF_F_NTUPLE;
6001
6002         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6003          * turned on or off together.
6004          */
6005         vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
6006                                     NETIF_F_HW_VLAN_STAG_RX);
6007         if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
6008                               NETIF_F_HW_VLAN_STAG_RX)) {
6009                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6010                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6011                                       NETIF_F_HW_VLAN_STAG_RX);
6012                 else if (vlan_features)
6013                         features |= NETIF_F_HW_VLAN_CTAG_RX |
6014                                     NETIF_F_HW_VLAN_STAG_RX;
6015         }
6016 #ifdef CONFIG_BNXT_SRIOV
6017         if (BNXT_VF(bp)) {
6018                 if (bp->vf.vlan) {
6019                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6020                                       NETIF_F_HW_VLAN_STAG_RX);
6021                 }
6022         }
6023 #endif
6024         return features;
6025 }
6026
6027 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6028 {
6029         struct bnxt *bp = netdev_priv(dev);
6030         u32 flags = bp->flags;
6031         u32 changes;
6032         int rc = 0;
6033         bool re_init = false;
6034         bool update_tpa = false;
6035
6036         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
6037         if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6038                 flags |= BNXT_FLAG_GRO;
6039         if (features & NETIF_F_LRO)
6040                 flags |= BNXT_FLAG_LRO;
6041
6042         if (features & NETIF_F_HW_VLAN_CTAG_RX)
6043                 flags |= BNXT_FLAG_STRIP_VLAN;
6044
6045         if (features & NETIF_F_NTUPLE)
6046                 flags |= BNXT_FLAG_RFS;
6047
6048         changes = flags ^ bp->flags;
6049         if (changes & BNXT_FLAG_TPA) {
6050                 update_tpa = true;
6051                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6052                     (flags & BNXT_FLAG_TPA) == 0)
6053                         re_init = true;
6054         }
6055
6056         if (changes & ~BNXT_FLAG_TPA)
6057                 re_init = true;
6058
6059         if (flags != bp->flags) {
6060                 u32 old_flags = bp->flags;
6061
6062                 bp->flags = flags;
6063
6064                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6065                         if (update_tpa)
6066                                 bnxt_set_ring_params(bp);
6067                         return rc;
6068                 }
6069
6070                 if (re_init) {
6071                         bnxt_close_nic(bp, false, false);
6072                         if (update_tpa)
6073                                 bnxt_set_ring_params(bp);
6074
6075                         return bnxt_open_nic(bp, false, false);
6076                 }
6077                 if (update_tpa) {
6078                         rc = bnxt_set_tpa(bp,
6079                                           (flags & BNXT_FLAG_TPA) ?
6080                                           true : false);
6081                         if (rc)
6082                                 bp->flags = old_flags;
6083                 }
6084         }
6085         return rc;
6086 }
6087
6088 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6089 {
6090         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
6091         int i = bnapi->index;
6092
6093         if (!txr)
6094                 return;
6095
6096         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6097                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6098                     txr->tx_cons);
6099 }
6100
6101 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6102 {
6103         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6104         int i = bnapi->index;
6105
6106         if (!rxr)
6107                 return;
6108
6109         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6110                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6111                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6112                     rxr->rx_sw_agg_prod);
6113 }
6114
6115 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6116 {
6117         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6118         int i = bnapi->index;
6119
6120         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6121                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6122 }
6123
6124 static void bnxt_dbg_dump_states(struct bnxt *bp)
6125 {
6126         int i;
6127         struct bnxt_napi *bnapi;
6128
6129         for (i = 0; i < bp->cp_nr_rings; i++) {
6130                 bnapi = bp->bnapi[i];
6131                 if (netif_msg_drv(bp)) {
6132                         bnxt_dump_tx_sw_state(bnapi);
6133                         bnxt_dump_rx_sw_state(bnapi);
6134                         bnxt_dump_cp_sw_state(bnapi);
6135                 }
6136         }
6137 }
6138
6139 static void bnxt_reset_task(struct bnxt *bp, bool silent)
6140 {
6141         if (!silent)
6142                 bnxt_dbg_dump_states(bp);
6143         if (netif_running(bp->dev)) {
6144                 bnxt_close_nic(bp, false, false);
6145                 bnxt_open_nic(bp, false, false);
6146         }
6147 }
6148
6149 static void bnxt_tx_timeout(struct net_device *dev)
6150 {
6151         struct bnxt *bp = netdev_priv(dev);
6152
6153         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
6154         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6155         schedule_work(&bp->sp_task);
6156 }
6157
6158 #ifdef CONFIG_NET_POLL_CONTROLLER
6159 static void bnxt_poll_controller(struct net_device *dev)
6160 {
6161         struct bnxt *bp = netdev_priv(dev);
6162         int i;
6163
6164         for (i = 0; i < bp->cp_nr_rings; i++) {
6165                 struct bnxt_irq *irq = &bp->irq_tbl[i];
6166
6167                 disable_irq(irq->vector);
6168                 irq->handler(irq->vector, bp->bnapi[i]);
6169                 enable_irq(irq->vector);
6170         }
6171 }
6172 #endif
6173
6174 static void bnxt_timer(unsigned long data)
6175 {
6176         struct bnxt *bp = (struct bnxt *)data;
6177         struct net_device *dev = bp->dev;
6178
6179         if (!netif_running(dev))
6180                 return;
6181
6182         if (atomic_read(&bp->intr_sem) != 0)
6183                 goto bnxt_restart_timer;
6184
6185         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
6186                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6187                 schedule_work(&bp->sp_task);
6188         }
6189 bnxt_restart_timer:
6190         mod_timer(&bp->timer, jiffies + bp->current_interval);
6191 }
6192
6193 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6194 {
6195         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6196          * set.  If the device is being closed, bnxt_close() may be holding
6197          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
6198          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6199          */
6200         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6201         rtnl_lock();
6202 }
6203
6204 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
6205 {
6206         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6207         rtnl_unlock();
6208 }
6209
6210 /* Only called from bnxt_sp_task() */
6211 static void bnxt_reset(struct bnxt *bp, bool silent)
6212 {
6213         bnxt_rtnl_lock_sp(bp);
6214         if (test_bit(BNXT_STATE_OPEN, &bp->state))
6215                 bnxt_reset_task(bp, silent);
6216         bnxt_rtnl_unlock_sp(bp);
6217 }
6218
6219 static void bnxt_cfg_ntp_filters(struct bnxt *);
6220
6221 static void bnxt_sp_task(struct work_struct *work)
6222 {
6223         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6224
6225         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6226         smp_mb__after_atomic();
6227         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6228                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6229                 return;
6230         }
6231
6232         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
6233                 bnxt_cfg_rx_mode(bp);
6234
6235         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6236                 bnxt_cfg_ntp_filters(bp);
6237         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6238                 bnxt_hwrm_exec_fwd_req(bp);
6239         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6240                 bnxt_hwrm_tunnel_dst_port_alloc(
6241                         bp, bp->vxlan_port,
6242                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6243         }
6244         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6245                 bnxt_hwrm_tunnel_dst_port_free(
6246                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6247         }
6248         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6249                 bnxt_hwrm_tunnel_dst_port_alloc(
6250                         bp, bp->nge_port,
6251                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6252         }
6253         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6254                 bnxt_hwrm_tunnel_dst_port_free(
6255                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6256         }
6257         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6258                 bnxt_hwrm_port_qstats(bp);
6259
6260         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6261                 int rc;
6262
6263                 mutex_lock(&bp->link_lock);
6264                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6265                                        &bp->sp_event))
6266                         bnxt_hwrm_phy_qcaps(bp);
6267
6268                 rc = bnxt_update_link(bp, true);
6269                 mutex_unlock(&bp->link_lock);
6270                 if (rc)
6271                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6272                                    rc);
6273         }
6274         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
6275                 mutex_lock(&bp->link_lock);
6276                 bnxt_get_port_module_status(bp);
6277                 mutex_unlock(&bp->link_lock);
6278         }
6279         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
6280          * must be the last functions to be called before exiting.
6281          */
6282         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6283                 bnxt_reset(bp, false);
6284
6285         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6286                 bnxt_reset(bp, true);
6287
6288         smp_mb__before_atomic();
6289         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6290 }
6291
6292 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
6293 {
6294         int rc;
6295         struct bnxt *bp = netdev_priv(dev);
6296
6297         SET_NETDEV_DEV(dev, &pdev->dev);
6298
6299         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6300         rc = pci_enable_device(pdev);
6301         if (rc) {
6302                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
6303                 goto init_err;
6304         }
6305
6306         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6307                 dev_err(&pdev->dev,
6308                         "Cannot find PCI device base address, aborting\n");
6309                 rc = -ENODEV;
6310                 goto init_err_disable;
6311         }
6312
6313         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6314         if (rc) {
6315                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
6316                 goto init_err_disable;
6317         }
6318
6319         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
6320             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6321                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
6322                 rc = -EIO;
6323                 goto init_err_release;
6324         }
6325
6326         pci_set_master(pdev);
6327
6328         bp->dev = dev;
6329         bp->pdev = pdev;
6330
6331         bp->bar0 = pci_ioremap_bar(pdev, 0);
6332         if (!bp->bar0) {
6333                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
6334                 rc = -ENOMEM;
6335                 goto init_err_release;
6336         }
6337
6338         bp->bar1 = pci_ioremap_bar(pdev, 2);
6339         if (!bp->bar1) {
6340                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
6341                 rc = -ENOMEM;
6342                 goto init_err_release;
6343         }
6344
6345         bp->bar2 = pci_ioremap_bar(pdev, 4);
6346         if (!bp->bar2) {
6347                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
6348                 rc = -ENOMEM;
6349                 goto init_err_release;
6350         }
6351
6352         pci_enable_pcie_error_reporting(pdev);
6353
6354         INIT_WORK(&bp->sp_task, bnxt_sp_task);
6355
6356         spin_lock_init(&bp->ntp_fltr_lock);
6357
6358         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
6359         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
6360
6361         /* tick values in micro seconds */
6362         bp->rx_coal_ticks = 12;
6363         bp->rx_coal_bufs = 30;
6364         bp->rx_coal_ticks_irq = 1;
6365         bp->rx_coal_bufs_irq = 2;
6366
6367         bp->tx_coal_ticks = 25;
6368         bp->tx_coal_bufs = 30;
6369         bp->tx_coal_ticks_irq = 2;
6370         bp->tx_coal_bufs_irq = 2;
6371
6372         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
6373
6374         init_timer(&bp->timer);
6375         bp->timer.data = (unsigned long)bp;
6376         bp->timer.function = bnxt_timer;
6377         bp->current_interval = BNXT_TIMER_INTERVAL;
6378
6379         clear_bit(BNXT_STATE_OPEN, &bp->state);
6380
6381         return 0;
6382
6383 init_err_release:
6384         if (bp->bar2) {
6385                 pci_iounmap(pdev, bp->bar2);
6386                 bp->bar2 = NULL;
6387         }
6388
6389         if (bp->bar1) {
6390                 pci_iounmap(pdev, bp->bar1);
6391                 bp->bar1 = NULL;
6392         }
6393
6394         if (bp->bar0) {
6395                 pci_iounmap(pdev, bp->bar0);
6396                 bp->bar0 = NULL;
6397         }
6398
6399         pci_release_regions(pdev);
6400
6401 init_err_disable:
6402         pci_disable_device(pdev);
6403
6404 init_err:
6405         return rc;
6406 }
6407
6408 /* rtnl_lock held */
6409 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
6410 {
6411         struct sockaddr *addr = p;
6412         struct bnxt *bp = netdev_priv(dev);
6413         int rc = 0;
6414
6415         if (!is_valid_ether_addr(addr->sa_data))
6416                 return -EADDRNOTAVAIL;
6417
6418         rc = bnxt_approve_mac(bp, addr->sa_data);
6419         if (rc)
6420                 return rc;
6421
6422         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
6423                 return 0;
6424
6425         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6426         if (netif_running(dev)) {
6427                 bnxt_close_nic(bp, false, false);
6428                 rc = bnxt_open_nic(bp, false, false);
6429         }
6430
6431         return rc;
6432 }
6433
6434 /* rtnl_lock held */
6435 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
6436 {
6437         struct bnxt *bp = netdev_priv(dev);
6438
6439         if (new_mtu < 60 || new_mtu > 9500)
6440                 return -EINVAL;
6441
6442         if (netif_running(dev))
6443                 bnxt_close_nic(bp, true, false);
6444
6445         dev->mtu = new_mtu;
6446         bnxt_set_ring_params(bp);
6447
6448         if (netif_running(dev))
6449                 return bnxt_open_nic(bp, true, false);
6450
6451         return 0;
6452 }
6453
6454 static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6455                          struct tc_to_netdev *ntc)
6456 {
6457         struct bnxt *bp = netdev_priv(dev);
6458         bool sh = false;
6459         u8 tc;
6460
6461         if (ntc->type != TC_SETUP_MQPRIO)
6462                 return -EINVAL;
6463
6464         tc = ntc->tc;
6465
6466         if (tc > bp->max_tc) {
6467                 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
6468                            tc, bp->max_tc);
6469                 return -EINVAL;
6470         }
6471
6472         if (netdev_get_num_tc(dev) == tc)
6473                 return 0;
6474
6475         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6476                 sh = true;
6477
6478         if (tc) {
6479                 int max_rx_rings, max_tx_rings, rc;
6480
6481                 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6482                 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
6483                         return -ENOMEM;
6484         }
6485
6486         /* Needs to close the device and do hw resource re-allocations */
6487         if (netif_running(bp->dev))
6488                 bnxt_close_nic(bp, true, false);
6489
6490         if (tc) {
6491                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
6492                 netdev_set_num_tc(dev, tc);
6493         } else {
6494                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6495                 netdev_reset_tc(dev);
6496         }
6497         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6498                                bp->tx_nr_rings + bp->rx_nr_rings;
6499         bp->num_stat_ctxs = bp->cp_nr_rings;
6500
6501         if (netif_running(bp->dev))
6502                 return bnxt_open_nic(bp, true, false);
6503
6504         return 0;
6505 }
6506
6507 #ifdef CONFIG_RFS_ACCEL
6508 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
6509                             struct bnxt_ntuple_filter *f2)
6510 {
6511         struct flow_keys *keys1 = &f1->fkeys;
6512         struct flow_keys *keys2 = &f2->fkeys;
6513
6514         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
6515             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
6516             keys1->ports.ports == keys2->ports.ports &&
6517             keys1->basic.ip_proto == keys2->basic.ip_proto &&
6518             keys1->basic.n_proto == keys2->basic.n_proto &&
6519             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
6520             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
6521                 return true;
6522
6523         return false;
6524 }
6525
6526 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
6527                               u16 rxq_index, u32 flow_id)
6528 {
6529         struct bnxt *bp = netdev_priv(dev);
6530         struct bnxt_ntuple_filter *fltr, *new_fltr;
6531         struct flow_keys *fkeys;
6532         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
6533         int rc = 0, idx, bit_id, l2_idx = 0;
6534         struct hlist_head *head;
6535
6536         if (skb->encapsulation)
6537                 return -EPROTONOSUPPORT;
6538
6539         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
6540                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6541                 int off = 0, j;
6542
6543                 netif_addr_lock_bh(dev);
6544                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
6545                         if (ether_addr_equal(eth->h_dest,
6546                                              vnic->uc_list + off)) {
6547                                 l2_idx = j + 1;
6548                                 break;
6549                         }
6550                 }
6551                 netif_addr_unlock_bh(dev);
6552                 if (!l2_idx)
6553                         return -EINVAL;
6554         }
6555         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
6556         if (!new_fltr)
6557                 return -ENOMEM;
6558
6559         fkeys = &new_fltr->fkeys;
6560         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
6561                 rc = -EPROTONOSUPPORT;
6562                 goto err_free;
6563         }
6564
6565         if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
6566             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
6567              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
6568                 rc = -EPROTONOSUPPORT;
6569                 goto err_free;
6570         }
6571
6572         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
6573         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
6574
6575         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
6576         head = &bp->ntp_fltr_hash_tbl[idx];
6577         rcu_read_lock();
6578         hlist_for_each_entry_rcu(fltr, head, hash) {
6579                 if (bnxt_fltr_match(fltr, new_fltr)) {
6580                         rcu_read_unlock();
6581                         rc = 0;
6582                         goto err_free;
6583                 }
6584         }
6585         rcu_read_unlock();
6586
6587         spin_lock_bh(&bp->ntp_fltr_lock);
6588         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6589                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
6590         if (bit_id < 0) {
6591                 spin_unlock_bh(&bp->ntp_fltr_lock);
6592                 rc = -ENOMEM;
6593                 goto err_free;
6594         }
6595
6596         new_fltr->sw_id = (u16)bit_id;
6597         new_fltr->flow_id = flow_id;
6598         new_fltr->l2_fltr_idx = l2_idx;
6599         new_fltr->rxq = rxq_index;
6600         hlist_add_head_rcu(&new_fltr->hash, head);
6601         bp->ntp_fltr_count++;
6602         spin_unlock_bh(&bp->ntp_fltr_lock);
6603
6604         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
6605         schedule_work(&bp->sp_task);
6606
6607         return new_fltr->sw_id;
6608
6609 err_free:
6610         kfree(new_fltr);
6611         return rc;
6612 }
6613
6614 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6615 {
6616         int i;
6617
6618         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
6619                 struct hlist_head *head;
6620                 struct hlist_node *tmp;
6621                 struct bnxt_ntuple_filter *fltr;
6622                 int rc;
6623
6624                 head = &bp->ntp_fltr_hash_tbl[i];
6625                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
6626                         bool del = false;
6627
6628                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
6629                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
6630                                                         fltr->flow_id,
6631                                                         fltr->sw_id)) {
6632                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
6633                                                                          fltr);
6634                                         del = true;
6635                                 }
6636                         } else {
6637                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
6638                                                                        fltr);
6639                                 if (rc)
6640                                         del = true;
6641                                 else
6642                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
6643                         }
6644
6645                         if (del) {
6646                                 spin_lock_bh(&bp->ntp_fltr_lock);
6647                                 hlist_del_rcu(&fltr->hash);
6648                                 bp->ntp_fltr_count--;
6649                                 spin_unlock_bh(&bp->ntp_fltr_lock);
6650                                 synchronize_rcu();
6651                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
6652                                 kfree(fltr);
6653                         }
6654                 }
6655         }
6656         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
6657                 netdev_info(bp->dev, "Receive PF driver unload event!");
6658 }
6659
6660 #else
6661
6662 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6663 {
6664 }
6665
6666 #endif /* CONFIG_RFS_ACCEL */
6667
6668 static void bnxt_udp_tunnel_add(struct net_device *dev,
6669                                 struct udp_tunnel_info *ti)
6670 {
6671         struct bnxt *bp = netdev_priv(dev);
6672
6673         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6674                 return;
6675
6676         if (!netif_running(dev))
6677                 return;
6678
6679         switch (ti->type) {
6680         case UDP_TUNNEL_TYPE_VXLAN:
6681                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
6682                         return;
6683
6684                 bp->vxlan_port_cnt++;
6685                 if (bp->vxlan_port_cnt == 1) {
6686                         bp->vxlan_port = ti->port;
6687                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
6688                         schedule_work(&bp->sp_task);
6689                 }
6690                 break;
6691         case UDP_TUNNEL_TYPE_GENEVE:
6692                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
6693                         return;
6694
6695                 bp->nge_port_cnt++;
6696                 if (bp->nge_port_cnt == 1) {
6697                         bp->nge_port = ti->port;
6698                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
6699                 }
6700                 break;
6701         default:
6702                 return;
6703         }
6704
6705         schedule_work(&bp->sp_task);
6706 }
6707
6708 static void bnxt_udp_tunnel_del(struct net_device *dev,
6709                                 struct udp_tunnel_info *ti)
6710 {
6711         struct bnxt *bp = netdev_priv(dev);
6712
6713         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
6714                 return;
6715
6716         if (!netif_running(dev))
6717                 return;
6718
6719         switch (ti->type) {
6720         case UDP_TUNNEL_TYPE_VXLAN:
6721                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
6722                         return;
6723                 bp->vxlan_port_cnt--;
6724
6725                 if (bp->vxlan_port_cnt != 0)
6726                         return;
6727
6728                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
6729                 break;
6730         case UDP_TUNNEL_TYPE_GENEVE:
6731                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
6732                         return;
6733                 bp->nge_port_cnt--;
6734
6735                 if (bp->nge_port_cnt != 0)
6736                         return;
6737
6738                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
6739                 break;
6740         default:
6741                 return;
6742         }
6743
6744         schedule_work(&bp->sp_task);
6745 }
6746
6747 static const struct net_device_ops bnxt_netdev_ops = {
6748         .ndo_open               = bnxt_open,
6749         .ndo_start_xmit         = bnxt_start_xmit,
6750         .ndo_stop               = bnxt_close,
6751         .ndo_get_stats64        = bnxt_get_stats64,
6752         .ndo_set_rx_mode        = bnxt_set_rx_mode,
6753         .ndo_do_ioctl           = bnxt_ioctl,
6754         .ndo_validate_addr      = eth_validate_addr,
6755         .ndo_set_mac_address    = bnxt_change_mac_addr,
6756         .ndo_change_mtu         = bnxt_change_mtu,
6757         .ndo_fix_features       = bnxt_fix_features,
6758         .ndo_set_features       = bnxt_set_features,
6759         .ndo_tx_timeout         = bnxt_tx_timeout,
6760 #ifdef CONFIG_BNXT_SRIOV
6761         .ndo_get_vf_config      = bnxt_get_vf_config,
6762         .ndo_set_vf_mac         = bnxt_set_vf_mac,
6763         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
6764         .ndo_set_vf_rate        = bnxt_set_vf_bw,
6765         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
6766         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
6767 #endif
6768 #ifdef CONFIG_NET_POLL_CONTROLLER
6769         .ndo_poll_controller    = bnxt_poll_controller,
6770 #endif
6771         .ndo_setup_tc           = bnxt_setup_tc,
6772 #ifdef CONFIG_RFS_ACCEL
6773         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
6774 #endif
6775         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
6776         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
6777 #ifdef CONFIG_NET_RX_BUSY_POLL
6778         .ndo_busy_poll          = bnxt_busy_poll,
6779 #endif
6780 };
6781
6782 static void bnxt_remove_one(struct pci_dev *pdev)
6783 {
6784         struct net_device *dev = pci_get_drvdata(pdev);
6785         struct bnxt *bp = netdev_priv(dev);
6786
6787         if (BNXT_PF(bp))
6788                 bnxt_sriov_disable(bp);
6789
6790         pci_disable_pcie_error_reporting(pdev);
6791         unregister_netdev(dev);
6792         cancel_work_sync(&bp->sp_task);
6793         bp->sp_event = 0;
6794
6795         bnxt_hwrm_func_drv_unrgtr(bp);
6796         bnxt_free_hwrm_resources(bp);
6797         pci_iounmap(pdev, bp->bar2);
6798         pci_iounmap(pdev, bp->bar1);
6799         pci_iounmap(pdev, bp->bar0);
6800         free_netdev(dev);
6801
6802         pci_release_regions(pdev);
6803         pci_disable_device(pdev);
6804 }
6805
6806 static int bnxt_probe_phy(struct bnxt *bp)
6807 {
6808         int rc = 0;
6809         struct bnxt_link_info *link_info = &bp->link_info;
6810
6811         rc = bnxt_hwrm_phy_qcaps(bp);
6812         if (rc) {
6813                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
6814                            rc);
6815                 return rc;
6816         }
6817         mutex_init(&bp->link_lock);
6818
6819         rc = bnxt_update_link(bp, false);
6820         if (rc) {
6821                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
6822                            rc);
6823                 return rc;
6824         }
6825
6826         /* Older firmware does not have supported_auto_speeds, so assume
6827          * that all supported speeds can be autonegotiated.
6828          */
6829         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
6830                 link_info->support_auto_speeds = link_info->support_speeds;
6831
6832         /*initialize the ethool setting copy with NVM settings */
6833         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
6834                 link_info->autoneg = BNXT_AUTONEG_SPEED;
6835                 if (bp->hwrm_spec_code >= 0x10201) {
6836                         if (link_info->auto_pause_setting &
6837                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
6838                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6839                 } else {
6840                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6841                 }
6842                 link_info->advertising = link_info->auto_link_speeds;
6843         } else {
6844                 link_info->req_link_speed = link_info->force_link_speed;
6845                 link_info->req_duplex = link_info->duplex_setting;
6846         }
6847         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
6848                 link_info->req_flow_ctrl =
6849                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
6850         else
6851                 link_info->req_flow_ctrl = link_info->force_pause_setting;
6852         return rc;
6853 }
6854
6855 static int bnxt_get_max_irq(struct pci_dev *pdev)
6856 {
6857         u16 ctrl;
6858
6859         if (!pdev->msix_cap)
6860                 return 1;
6861
6862         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
6863         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
6864 }
6865
6866 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6867                                 int *max_cp)
6868 {
6869         int max_ring_grps = 0;
6870
6871 #ifdef CONFIG_BNXT_SRIOV
6872         if (!BNXT_PF(bp)) {
6873                 *max_tx = bp->vf.max_tx_rings;
6874                 *max_rx = bp->vf.max_rx_rings;
6875                 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
6876                 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
6877                 max_ring_grps = bp->vf.max_hw_ring_grps;
6878         } else
6879 #endif
6880         {
6881                 *max_tx = bp->pf.max_tx_rings;
6882                 *max_rx = bp->pf.max_rx_rings;
6883                 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
6884                 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
6885                 max_ring_grps = bp->pf.max_hw_ring_grps;
6886         }
6887         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
6888                 *max_cp -= 1;
6889                 *max_rx -= 2;
6890         }
6891         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6892                 *max_rx >>= 1;
6893         *max_rx = min_t(int, *max_rx, max_ring_grps);
6894 }
6895
6896 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
6897 {
6898         int rx, tx, cp;
6899
6900         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
6901         *max_rx = rx;
6902         *max_tx = tx;
6903         if (!rx || !tx || !cp)
6904                 return -ENOMEM;
6905
6906         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
6907 }
6908
6909 static int bnxt_set_dflt_rings(struct bnxt *bp)
6910 {
6911         int dflt_rings, max_rx_rings, max_tx_rings, rc;
6912         bool sh = true;
6913
6914         if (sh)
6915                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
6916         dflt_rings = netif_get_num_default_rss_queues();
6917         rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6918         if (rc)
6919                 return rc;
6920         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
6921         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
6922         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6923         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6924                                bp->tx_nr_rings + bp->rx_nr_rings;
6925         bp->num_stat_ctxs = bp->cp_nr_rings;
6926         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6927                 bp->rx_nr_rings++;
6928                 bp->cp_nr_rings++;
6929         }
6930         return rc;
6931 }
6932
6933 static void bnxt_parse_log_pcie_link(struct bnxt *bp)
6934 {
6935         enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
6936         enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
6937
6938         if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
6939             speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
6940                 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
6941         else
6942                 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
6943                             speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
6944                             speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
6945                             speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
6946                             "Unknown", width);
6947 }
6948
6949 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6950 {
6951         static int version_printed;
6952         struct net_device *dev;
6953         struct bnxt *bp;
6954         int rc, max_irqs;
6955
6956         if (pdev->device == 0x16cd && pci_is_bridge(pdev))
6957                 return -ENODEV;
6958
6959         if (version_printed++ == 0)
6960                 pr_info("%s", version);
6961
6962         max_irqs = bnxt_get_max_irq(pdev);
6963         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
6964         if (!dev)
6965                 return -ENOMEM;
6966
6967         bp = netdev_priv(dev);
6968
6969         if (bnxt_vf_pciid(ent->driver_data))
6970                 bp->flags |= BNXT_FLAG_VF;
6971
6972         if (pdev->msix_cap)
6973                 bp->flags |= BNXT_FLAG_MSIX_CAP;
6974
6975         rc = bnxt_init_board(pdev, dev);
6976         if (rc < 0)
6977                 goto init_err_free;
6978
6979         dev->netdev_ops = &bnxt_netdev_ops;
6980         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
6981         dev->ethtool_ops = &bnxt_ethtool_ops;
6982
6983         pci_set_drvdata(pdev, dev);
6984
6985         rc = bnxt_alloc_hwrm_resources(bp);
6986         if (rc)
6987                 goto init_err;
6988
6989         mutex_init(&bp->hwrm_cmd_lock);
6990         rc = bnxt_hwrm_ver_get(bp);
6991         if (rc)
6992                 goto init_err;
6993
6994         bnxt_hwrm_fw_set_time(bp);
6995
6996         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6997                            NETIF_F_TSO | NETIF_F_TSO6 |
6998                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
6999                            NETIF_F_GSO_IPXIP4 |
7000                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7001                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
7002                            NETIF_F_RXCSUM | NETIF_F_GRO;
7003
7004         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
7005                 dev->hw_features |= NETIF_F_LRO;
7006
7007         dev->hw_enc_features =
7008                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7009                         NETIF_F_TSO | NETIF_F_TSO6 |
7010                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7011                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7012                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
7013         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
7014                                     NETIF_F_GSO_GRE_CSUM;
7015         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
7016         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7017                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
7018         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
7019         dev->priv_flags |= IFF_UNICAST_FLT;
7020
7021 #ifdef CONFIG_BNXT_SRIOV
7022         init_waitqueue_head(&bp->sriov_cfg_wait);
7023 #endif
7024         bp->gro_func = bnxt_gro_func_5730x;
7025         if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
7026                 bp->gro_func = bnxt_gro_func_5731x;
7027
7028         rc = bnxt_hwrm_func_drv_rgtr(bp);
7029         if (rc)
7030                 goto init_err;
7031
7032         /* Get the MAX capabilities for this function */
7033         rc = bnxt_hwrm_func_qcaps(bp);
7034         if (rc) {
7035                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
7036                            rc);
7037                 rc = -1;
7038                 goto init_err;
7039         }
7040
7041         rc = bnxt_hwrm_queue_qportcfg(bp);
7042         if (rc) {
7043                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
7044                            rc);
7045                 rc = -1;
7046                 goto init_err;
7047         }
7048
7049         bnxt_hwrm_func_qcfg(bp);
7050
7051         bnxt_set_tpa_flags(bp);
7052         bnxt_set_ring_params(bp);
7053         if (BNXT_PF(bp))
7054                 bp->pf.max_irqs = max_irqs;
7055 #if defined(CONFIG_BNXT_SRIOV)
7056         else
7057                 bp->vf.max_irqs = max_irqs;
7058 #endif
7059         bnxt_set_dflt_rings(bp);
7060
7061         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7062                 dev->hw_features |= NETIF_F_NTUPLE;
7063                 if (bnxt_rfs_capable(bp)) {
7064                         bp->flags |= BNXT_FLAG_RFS;
7065                         dev->features |= NETIF_F_NTUPLE;
7066                 }
7067         }
7068
7069         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
7070                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
7071
7072         rc = bnxt_probe_phy(bp);
7073         if (rc)
7074                 goto init_err;
7075
7076         rc = register_netdev(dev);
7077         if (rc)
7078                 goto init_err;
7079
7080         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
7081                     board_info[ent->driver_data].name,
7082                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
7083
7084         bnxt_parse_log_pcie_link(bp);
7085
7086         pci_save_state(pdev);
7087         return 0;
7088
7089 init_err:
7090         pci_iounmap(pdev, bp->bar0);
7091         pci_release_regions(pdev);
7092         pci_disable_device(pdev);
7093
7094 init_err_free:
7095         free_netdev(dev);
7096         return rc;
7097 }
7098
7099 /**
7100  * bnxt_io_error_detected - called when PCI error is detected
7101  * @pdev: Pointer to PCI device
7102  * @state: The current pci connection state
7103  *
7104  * This function is called after a PCI bus error affecting
7105  * this device has been detected.
7106  */
7107 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
7108                                                pci_channel_state_t state)
7109 {
7110         struct net_device *netdev = pci_get_drvdata(pdev);
7111         struct bnxt *bp = netdev_priv(netdev);
7112
7113         netdev_info(netdev, "PCI I/O error detected\n");
7114
7115         rtnl_lock();
7116         netif_device_detach(netdev);
7117
7118         if (state == pci_channel_io_perm_failure) {
7119                 rtnl_unlock();
7120                 return PCI_ERS_RESULT_DISCONNECT;
7121         }
7122
7123         if (netif_running(netdev))
7124                 bnxt_close(netdev);
7125
7126         /* So that func_reset will be done during slot_reset */
7127         clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
7128         pci_disable_device(pdev);
7129         rtnl_unlock();
7130
7131         /* Request a slot slot reset. */
7132         return PCI_ERS_RESULT_NEED_RESET;
7133 }
7134
7135 /**
7136  * bnxt_io_slot_reset - called after the pci bus has been reset.
7137  * @pdev: Pointer to PCI device
7138  *
7139  * Restart the card from scratch, as if from a cold-boot.
7140  * At this point, the card has exprienced a hard reset,
7141  * followed by fixups by BIOS, and has its config space
7142  * set up identically to what it was at cold boot.
7143  */
7144 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
7145 {
7146         struct net_device *netdev = pci_get_drvdata(pdev);
7147         struct bnxt *bp = netdev_priv(netdev);
7148         int err = 0;
7149         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
7150
7151         netdev_info(bp->dev, "PCI Slot Reset\n");
7152
7153         rtnl_lock();
7154
7155         if (pci_enable_device(pdev)) {
7156                 dev_err(&pdev->dev,
7157                         "Cannot re-enable PCI device after reset.\n");
7158         } else {
7159                 pci_set_master(pdev);
7160                 pci_restore_state(pdev);
7161                 pci_save_state(pdev);
7162
7163                 if (netif_running(netdev))
7164                         err = bnxt_open(netdev);
7165
7166                 if (!err)
7167                         result = PCI_ERS_RESULT_RECOVERED;
7168         }
7169
7170         if (result != PCI_ERS_RESULT_RECOVERED) {
7171                 if (netif_running(netdev))
7172                         dev_close(netdev);
7173                 pci_disable_device(pdev);
7174         }
7175
7176         rtnl_unlock();
7177
7178         err = pci_cleanup_aer_uncorrect_error_status(pdev);
7179         if (err) {
7180                 dev_err(&pdev->dev,
7181                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7182                          err); /* non-fatal, continue */
7183         }
7184
7185         return result;
7186 }
7187
7188 /**
7189  * bnxt_io_resume - called when traffic can start flowing again.
7190  * @pdev: Pointer to PCI device
7191  *
7192  * This callback is called when the error recovery driver tells
7193  * us that its OK to resume normal operation.
7194  */
7195 static void bnxt_io_resume(struct pci_dev *pdev)
7196 {
7197         struct net_device *netdev = pci_get_drvdata(pdev);
7198
7199         rtnl_lock();
7200
7201         netif_device_attach(netdev);
7202
7203         rtnl_unlock();
7204 }
7205
7206 static const struct pci_error_handlers bnxt_err_handler = {
7207         .error_detected = bnxt_io_error_detected,
7208         .slot_reset     = bnxt_io_slot_reset,
7209         .resume         = bnxt_io_resume
7210 };
7211
7212 static struct pci_driver bnxt_pci_driver = {
7213         .name           = DRV_MODULE_NAME,
7214         .id_table       = bnxt_pci_tbl,
7215         .probe          = bnxt_init_one,
7216         .remove         = bnxt_remove_one,
7217         .err_handler    = &bnxt_err_handler,
7218 #if defined(CONFIG_BNXT_SRIOV)
7219         .sriov_configure = bnxt_sriov_configure,
7220 #endif
7221 };
7222
7223 module_pci_driver(bnxt_pci_driver);