GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/ipv6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
34 #include "bnx2x_sp.h"
35
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 {
43         int i;
44
45         /* Add NAPI objects */
46         for_each_rx_queue_cnic(bp, i) {
47                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48                                bnx2x_poll, NAPI_POLL_WEIGHT);
49         }
50 }
51
52 static void bnx2x_add_all_napi(struct bnx2x *bp)
53 {
54         int i;
55
56         /* Add NAPI objects */
57         for_each_eth_queue(bp, i) {
58                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59                                bnx2x_poll, NAPI_POLL_WEIGHT);
60         }
61 }
62
63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 {
65         int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66
67         /* Reduce memory usage in kdump environment by using only one queue */
68         if (is_kdump_kernel())
69                 nq = 1;
70
71         nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72         return nq;
73 }
74
75 /**
76  * bnx2x_move_fp - move content of the fastpath structure.
77  *
78  * @bp:         driver handle
79  * @from:       source FP index
80  * @to:         destination FP index
81  *
82  * Makes sure the contents of the bp->fp[to].napi is kept
83  * intact. This is done by first copying the napi struct from
84  * the target to the source, and then mem copying the entire
85  * source onto the target. Update txdata pointers and related
86  * content.
87  */
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 {
90         struct bnx2x_fastpath *from_fp = &bp->fp[from];
91         struct bnx2x_fastpath *to_fp = &bp->fp[to];
92         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96         int old_max_eth_txqs, new_max_eth_txqs;
97         int old_txdata_index = 0, new_txdata_index = 0;
98         struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99
100         /* Copy the NAPI object as it has been already initialized */
101         from_fp->napi = to_fp->napi;
102
103         /* Move bnx2x_fastpath contents */
104         memcpy(to_fp, from_fp, sizeof(*to_fp));
105         to_fp->index = to;
106
107         /* Retain the tpa_info of the original `to' version as we don't want
108          * 2 FPs to contain the same tpa_info pointer.
109          */
110         to_fp->tpa_info = old_tpa_info;
111
112         /* move sp_objs contents as well, as their indices match fp ones */
113         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115         /* move fp_stats contents as well, as their indices match fp ones */
116         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
118         /* Update txdata pointers in fp and move txdata content accordingly:
119          * Each fp consumes 'max_cos' txdata structures, so the index should be
120          * decremented by max_cos x delta.
121          */
122
123         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125                                 (bp)->max_cos;
126         if (from == FCOE_IDX(bp)) {
127                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129         }
130
131         memcpy(&bp->bnx2x_txq[new_txdata_index],
132                &bp->bnx2x_txq[old_txdata_index],
133                sizeof(struct bnx2x_fp_txdata));
134         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
135 }
136
137 /**
138  * bnx2x_fill_fw_str - Fill buffer with FW version string.
139  *
140  * @bp:        driver handle
141  * @buf:       character buffer to fill with the fw name
142  * @buf_len:   length of the above buffer
143  *
144  */
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146 {
147         if (IS_PF(bp)) {
148                 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150                 phy_fw_ver[0] = '\0';
151                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152                                              phy_fw_ver, PHY_FW_VER_LEN);
153                 strlcpy(buf, bp->fw_ver, buf_len);
154                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155                          "bc %d.%d.%d%s%s",
156                          (bp->common.bc_ver & 0xff0000) >> 16,
157                          (bp->common.bc_ver & 0xff00) >> 8,
158                          (bp->common.bc_ver & 0xff),
159                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160         } else {
161                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
162         }
163 }
164
165 /**
166  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167  *
168  * @bp: driver handle
169  * @delta:      number of eth queues which were not allocated
170  */
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 {
173         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176          * backward along the array could cause memory to be overridden
177          */
178         for (cos = 1; cos < bp->max_cos; cos++) {
179                 for (i = 0; i < old_eth_num - delta; i++) {
180                         struct bnx2x_fastpath *fp = &bp->fp[i];
181                         int new_idx = cos * (old_eth_num - delta) + i;
182
183                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184                                sizeof(struct bnx2x_fp_txdata));
185                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186                 }
187         }
188 }
189
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191
192 /* free skb in the packet ring at pos idx
193  * return idx of last bd freed
194  */
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196                              u16 idx, unsigned int *pkts_compl,
197                              unsigned int *bytes_compl)
198 {
199         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200         struct eth_tx_start_bd *tx_start_bd;
201         struct eth_tx_bd *tx_data_bd;
202         struct sk_buff *skb = tx_buf->skb;
203         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204         int nbd;
205         u16 split_bd_len = 0;
206
207         /* prefetch skb end pointer to speedup dev_kfree_skb() */
208         prefetch(&skb->end);
209
210         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
211            txdata->txq_index, idx, tx_buf, skb);
212
213         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214
215         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218                 BNX2X_ERR("BAD nbd!\n");
219                 bnx2x_panic();
220         }
221 #endif
222         new_cons = nbd + tx_buf->first_bd;
223
224         /* Get the next bd */
225         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227         /* Skip a parse bd... */
228         --nbd;
229         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
231         if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232                 /* Skip second parse bd... */
233                 --nbd;
234                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235         }
236
237         /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240                 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241                 --nbd;
242                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243         }
244
245         /* unmap first bd */
246         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247                          BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248                          DMA_TO_DEVICE);
249
250         /* now free frags */
251         while (nbd > 0) {
252
253                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256                 if (--nbd)
257                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258         }
259
260         /* release skb */
261         WARN_ON(!skb);
262         if (likely(skb)) {
263                 (*pkts_compl)++;
264                 (*bytes_compl) += skb->len;
265                 dev_kfree_skb_any(skb);
266         }
267
268         tx_buf->first_bd = 0;
269         tx_buf->skb = NULL;
270
271         return new_cons;
272 }
273
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 {
276         struct netdev_queue *txq;
277         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278         unsigned int pkts_compl = 0, bytes_compl = 0;
279
280 #ifdef BNX2X_STOP_ON_ERROR
281         if (unlikely(bp->panic))
282                 return -1;
283 #endif
284
285         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287         sw_cons = txdata->tx_pkt_cons;
288
289         /* Ensure subsequent loads occur after hw_cons */
290         smp_rmb();
291
292         while (sw_cons != hw_cons) {
293                 u16 pkt_cons;
294
295                 pkt_cons = TX_BD(sw_cons);
296
297                 DP(NETIF_MSG_TX_DONE,
298                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
299                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
300
301                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
302                                             &pkts_compl, &bytes_compl);
303
304                 sw_cons++;
305         }
306
307         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
308
309         txdata->tx_pkt_cons = sw_cons;
310         txdata->tx_bd_cons = bd_cons;
311
312         /* Need to make the tx_bd_cons update visible to start_xmit()
313          * before checking for netif_tx_queue_stopped().  Without the
314          * memory barrier, there is a small possibility that
315          * start_xmit() will miss it and cause the queue to be stopped
316          * forever.
317          * On the other hand we need an rmb() here to ensure the proper
318          * ordering of bit testing in the following
319          * netif_tx_queue_stopped(txq) call.
320          */
321         smp_mb();
322
323         if (unlikely(netif_tx_queue_stopped(txq))) {
324                 /* Taking tx_lock() is needed to prevent re-enabling the queue
325                  * while it's empty. This could have happen if rx_action() gets
326                  * suspended in bnx2x_tx_int() after the condition before
327                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
328                  *
329                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
330                  * sends some packets consuming the whole queue again->
331                  * stops the queue
332                  */
333
334                 __netif_tx_lock(txq, smp_processor_id());
335
336                 if ((netif_tx_queue_stopped(txq)) &&
337                     (bp->state == BNX2X_STATE_OPEN) &&
338                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
339                         netif_tx_wake_queue(txq);
340
341                 __netif_tx_unlock(txq);
342         }
343         return 0;
344 }
345
346 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
347                                              u16 idx)
348 {
349         u16 last_max = fp->last_max_sge;
350
351         if (SUB_S16(idx, last_max) > 0)
352                 fp->last_max_sge = idx;
353 }
354
355 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
356                                          u16 sge_len,
357                                          struct eth_end_agg_rx_cqe *cqe)
358 {
359         struct bnx2x *bp = fp->bp;
360         u16 last_max, last_elem, first_elem;
361         u16 delta = 0;
362         u16 i;
363
364         if (!sge_len)
365                 return;
366
367         /* First mark all used pages */
368         for (i = 0; i < sge_len; i++)
369                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
370                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
371
372         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
373            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
374
375         /* Here we assume that the last SGE index is the biggest */
376         prefetch((void *)(fp->sge_mask));
377         bnx2x_update_last_max_sge(fp,
378                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
379
380         last_max = RX_SGE(fp->last_max_sge);
381         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
382         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
383
384         /* If ring is not full */
385         if (last_elem + 1 != first_elem)
386                 last_elem++;
387
388         /* Now update the prod */
389         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
390                 if (likely(fp->sge_mask[i]))
391                         break;
392
393                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
394                 delta += BIT_VEC64_ELEM_SZ;
395         }
396
397         if (delta > 0) {
398                 fp->rx_sge_prod += delta;
399                 /* clear page-end entries */
400                 bnx2x_clear_sge_mask_next_elems(fp);
401         }
402
403         DP(NETIF_MSG_RX_STATUS,
404            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
405            fp->last_max_sge, fp->rx_sge_prod);
406 }
407
408 /* Get Toeplitz hash value in the skb using the value from the
409  * CQE (calculated by HW).
410  */
411 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
412                             const struct eth_fast_path_rx_cqe *cqe,
413                             enum pkt_hash_types *rxhash_type)
414 {
415         /* Get Toeplitz hash from CQE */
416         if ((bp->dev->features & NETIF_F_RXHASH) &&
417             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
418                 enum eth_rss_hash_type htype;
419
420                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
421                 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
422                                 (htype == TCP_IPV6_HASH_TYPE)) ?
423                                PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
424
425                 return le32_to_cpu(cqe->rss_hash_result);
426         }
427         *rxhash_type = PKT_HASH_TYPE_NONE;
428         return 0;
429 }
430
431 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
432                             u16 cons, u16 prod,
433                             struct eth_fast_path_rx_cqe *cqe)
434 {
435         struct bnx2x *bp = fp->bp;
436         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
437         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
438         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
439         dma_addr_t mapping;
440         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
441         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
442
443         /* print error if current state != stop */
444         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
445                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
446
447         /* Try to map an empty data buffer from the aggregation info  */
448         mapping = dma_map_single(&bp->pdev->dev,
449                                  first_buf->data + NET_SKB_PAD,
450                                  fp->rx_buf_size, DMA_FROM_DEVICE);
451         /*
452          *  ...if it fails - move the skb from the consumer to the producer
453          *  and set the current aggregation state as ERROR to drop it
454          *  when TPA_STOP arrives.
455          */
456
457         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
458                 /* Move the BD from the consumer to the producer */
459                 bnx2x_reuse_rx_data(fp, cons, prod);
460                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
461                 return;
462         }
463
464         /* move empty data from pool to prod */
465         prod_rx_buf->data = first_buf->data;
466         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
467         /* point prod_bd to new data */
468         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
469         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
470
471         /* move partial skb from cons to pool (don't unmap yet) */
472         *first_buf = *cons_rx_buf;
473
474         /* mark bin state as START */
475         tpa_info->parsing_flags =
476                 le16_to_cpu(cqe->pars_flags.flags);
477         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
478         tpa_info->tpa_state = BNX2X_TPA_START;
479         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
480         tpa_info->placement_offset = cqe->placement_offset;
481         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
482         if (fp->mode == TPA_MODE_GRO) {
483                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
484                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
485                 tpa_info->gro_size = gro_size;
486         }
487
488 #ifdef BNX2X_STOP_ON_ERROR
489         fp->tpa_queue_used |= (1 << queue);
490         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
491            fp->tpa_queue_used);
492 #endif
493 }
494
495 /* Timestamp option length allowed for TPA aggregation:
496  *
497  *              nop nop kind length echo val
498  */
499 #define TPA_TSTAMP_OPT_LEN      12
500 /**
501  * bnx2x_set_gro_params - compute GRO values
502  *
503  * @skb:                packet skb
504  * @parsing_flags:      parsing flags from the START CQE
505  * @len_on_bd:          total length of the first packet for the
506  *                      aggregation.
507  * @pkt_len:            length of all segments
508  *
509  * Approximate value of the MSS for this aggregation calculated using
510  * the first packet of it.
511  * Compute number of aggregated segments, and gso_type.
512  */
513 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
514                                  u16 len_on_bd, unsigned int pkt_len,
515                                  u16 num_of_coalesced_segs)
516 {
517         /* TPA aggregation won't have either IP options or TCP options
518          * other than timestamp or IPv6 extension headers.
519          */
520         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
521
522         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
523             PRS_FLAG_OVERETH_IPV6) {
524                 hdrs_len += sizeof(struct ipv6hdr);
525                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
526         } else {
527                 hdrs_len += sizeof(struct iphdr);
528                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
529         }
530
531         /* Check if there was a TCP timestamp, if there is it's will
532          * always be 12 bytes length: nop nop kind length echo val.
533          *
534          * Otherwise FW would close the aggregation.
535          */
536         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537                 hdrs_len += TPA_TSTAMP_OPT_LEN;
538
539         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
540
541         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542          * to skb_shinfo(skb)->gso_segs
543          */
544         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
545 }
546
547 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548                               u16 index, gfp_t gfp_mask)
549 {
550         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
551         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
552         struct bnx2x_alloc_pool *pool = &fp->page_pool;
553         dma_addr_t mapping;
554
555         if (!pool->page) {
556                 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
557                 if (unlikely(!pool->page))
558                         return -ENOMEM;
559
560                 pool->offset = 0;
561         }
562
563         mapping = dma_map_page(&bp->pdev->dev, pool->page,
564                                pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
565         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
566                 BNX2X_ERR("Can't map sge\n");
567                 return -ENOMEM;
568         }
569
570         sw_buf->page = pool->page;
571         sw_buf->offset = pool->offset;
572
573         dma_unmap_addr_set(sw_buf, mapping, mapping);
574
575         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
576         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
577
578         pool->offset += SGE_PAGE_SIZE;
579         if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
580                 get_page(pool->page);
581         else
582                 pool->page = NULL;
583         return 0;
584 }
585
586 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
587                                struct bnx2x_agg_info *tpa_info,
588                                u16 pages,
589                                struct sk_buff *skb,
590                                struct eth_end_agg_rx_cqe *cqe,
591                                u16 cqe_idx)
592 {
593         struct sw_rx_page *rx_pg, old_rx_pg;
594         u32 i, frag_len, frag_size;
595         int err, j, frag_id = 0;
596         u16 len_on_bd = tpa_info->len_on_bd;
597         u16 full_page = 0, gro_size = 0;
598
599         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
600
601         if (fp->mode == TPA_MODE_GRO) {
602                 gro_size = tpa_info->gro_size;
603                 full_page = tpa_info->full_page;
604         }
605
606         /* This is needed in order to enable forwarding support */
607         if (frag_size)
608                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
609                                      le16_to_cpu(cqe->pkt_len),
610                                      le16_to_cpu(cqe->num_of_coalesced_segs));
611
612 #ifdef BNX2X_STOP_ON_ERROR
613         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
614                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
615                           pages, cqe_idx);
616                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
617                 bnx2x_panic();
618                 return -EINVAL;
619         }
620 #endif
621
622         /* Run through the SGL and compose the fragmented skb */
623         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
624                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
625
626                 /* FW gives the indices of the SGE as if the ring is an array
627                    (meaning that "next" element will consume 2 indices) */
628                 if (fp->mode == TPA_MODE_GRO)
629                         frag_len = min_t(u32, frag_size, (u32)full_page);
630                 else /* LRO */
631                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
632
633                 rx_pg = &fp->rx_page_ring[sge_idx];
634                 old_rx_pg = *rx_pg;
635
636                 /* If we fail to allocate a substitute page, we simply stop
637                    where we are and drop the whole packet */
638                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
639                 if (unlikely(err)) {
640                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
641                         return err;
642                 }
643
644                 dma_unmap_page(&bp->pdev->dev,
645                                dma_unmap_addr(&old_rx_pg, mapping),
646                                SGE_PAGE_SIZE, DMA_FROM_DEVICE);
647                 /* Add one frag and update the appropriate fields in the skb */
648                 if (fp->mode == TPA_MODE_LRO)
649                         skb_fill_page_desc(skb, j, old_rx_pg.page,
650                                            old_rx_pg.offset, frag_len);
651                 else { /* GRO */
652                         int rem;
653                         int offset = 0;
654                         for (rem = frag_len; rem > 0; rem -= gro_size) {
655                                 int len = rem > gro_size ? gro_size : rem;
656                                 skb_fill_page_desc(skb, frag_id++,
657                                                    old_rx_pg.page,
658                                                    old_rx_pg.offset + offset,
659                                                    len);
660                                 if (offset)
661                                         get_page(old_rx_pg.page);
662                                 offset += len;
663                         }
664                 }
665
666                 skb->data_len += frag_len;
667                 skb->truesize += SGE_PAGES;
668                 skb->len += frag_len;
669
670                 frag_size -= frag_len;
671         }
672
673         return 0;
674 }
675
676 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
677 {
678         if (fp->rx_frag_size)
679                 skb_free_frag(data);
680         else
681                 kfree(data);
682 }
683
684 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
685 {
686         if (fp->rx_frag_size) {
687                 /* GFP_KERNEL allocations are used only during initialization */
688                 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
689                         return (void *)__get_free_page(gfp_mask);
690
691                 return netdev_alloc_frag(fp->rx_frag_size);
692         }
693
694         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
695 }
696
697 #ifdef CONFIG_INET
698 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
699 {
700         const struct iphdr *iph = ip_hdr(skb);
701         struct tcphdr *th;
702
703         skb_set_transport_header(skb, sizeof(struct iphdr));
704         th = tcp_hdr(skb);
705
706         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
707                                   iph->saddr, iph->daddr, 0);
708 }
709
710 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
711 {
712         struct ipv6hdr *iph = ipv6_hdr(skb);
713         struct tcphdr *th;
714
715         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
716         th = tcp_hdr(skb);
717
718         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
719                                   &iph->saddr, &iph->daddr, 0);
720 }
721
722 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
723                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
724 {
725         skb_reset_network_header(skb);
726         gro_func(bp, skb);
727         tcp_gro_complete(skb);
728 }
729 #endif
730
731 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
732                                struct sk_buff *skb)
733 {
734 #ifdef CONFIG_INET
735         if (skb_shinfo(skb)->gso_size) {
736                 switch (be16_to_cpu(skb->protocol)) {
737                 case ETH_P_IP:
738                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
739                         break;
740                 case ETH_P_IPV6:
741                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
742                         break;
743                 default:
744                         netdev_WARN_ONCE(bp->dev,
745                                          "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
746                                          be16_to_cpu(skb->protocol));
747                 }
748         }
749 #endif
750         skb_record_rx_queue(skb, fp->rx_queue);
751         napi_gro_receive(&fp->napi, skb);
752 }
753
754 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
755                            struct bnx2x_agg_info *tpa_info,
756                            u16 pages,
757                            struct eth_end_agg_rx_cqe *cqe,
758                            u16 cqe_idx)
759 {
760         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
761         u8 pad = tpa_info->placement_offset;
762         u16 len = tpa_info->len_on_bd;
763         struct sk_buff *skb = NULL;
764         u8 *new_data, *data = rx_buf->data;
765         u8 old_tpa_state = tpa_info->tpa_state;
766
767         tpa_info->tpa_state = BNX2X_TPA_STOP;
768
769         /* If we there was an error during the handling of the TPA_START -
770          * drop this aggregation.
771          */
772         if (old_tpa_state == BNX2X_TPA_ERROR)
773                 goto drop;
774
775         /* Try to allocate the new data */
776         new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
777         /* Unmap skb in the pool anyway, as we are going to change
778            pool entry status to BNX2X_TPA_STOP even if new skb allocation
779            fails. */
780         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
781                          fp->rx_buf_size, DMA_FROM_DEVICE);
782         if (likely(new_data))
783                 skb = build_skb(data, fp->rx_frag_size);
784
785         if (likely(skb)) {
786 #ifdef BNX2X_STOP_ON_ERROR
787                 if (pad + len > fp->rx_buf_size) {
788                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
789                                   pad, len, fp->rx_buf_size);
790                         bnx2x_panic();
791                         return;
792                 }
793 #endif
794
795                 skb_reserve(skb, pad + NET_SKB_PAD);
796                 skb_put(skb, len);
797                 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
798
799                 skb->protocol = eth_type_trans(skb, bp->dev);
800                 skb->ip_summed = CHECKSUM_UNNECESSARY;
801
802                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
803                                          skb, cqe, cqe_idx)) {
804                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
805                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
806                         bnx2x_gro_receive(bp, fp, skb);
807                 } else {
808                         DP(NETIF_MSG_RX_STATUS,
809                            "Failed to allocate new pages - dropping packet!\n");
810                         dev_kfree_skb_any(skb);
811                 }
812
813                 /* put new data in bin */
814                 rx_buf->data = new_data;
815
816                 return;
817         }
818         if (new_data)
819                 bnx2x_frag_free(fp, new_data);
820 drop:
821         /* drop the packet and keep the buffer in the bin */
822         DP(NETIF_MSG_RX_STATUS,
823            "Failed to allocate or map a new skb - dropping packet!\n");
824         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
825 }
826
827 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
828                                u16 index, gfp_t gfp_mask)
829 {
830         u8 *data;
831         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
832         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
833         dma_addr_t mapping;
834
835         data = bnx2x_frag_alloc(fp, gfp_mask);
836         if (unlikely(data == NULL))
837                 return -ENOMEM;
838
839         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
840                                  fp->rx_buf_size,
841                                  DMA_FROM_DEVICE);
842         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
843                 bnx2x_frag_free(fp, data);
844                 BNX2X_ERR("Can't map rx data\n");
845                 return -ENOMEM;
846         }
847
848         rx_buf->data = data;
849         dma_unmap_addr_set(rx_buf, mapping, mapping);
850
851         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
852         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
853
854         return 0;
855 }
856
857 static
858 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
859                                  struct bnx2x_fastpath *fp,
860                                  struct bnx2x_eth_q_stats *qstats)
861 {
862         /* Do nothing if no L4 csum validation was done.
863          * We do not check whether IP csum was validated. For IPv4 we assume
864          * that if the card got as far as validating the L4 csum, it also
865          * validated the IP csum. IPv6 has no IP csum.
866          */
867         if (cqe->fast_path_cqe.status_flags &
868             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
869                 return;
870
871         /* If L4 validation was done, check if an error was found. */
872
873         if (cqe->fast_path_cqe.type_error_flags &
874             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
875              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
876                 qstats->hw_csum_err++;
877         else
878                 skb->ip_summed = CHECKSUM_UNNECESSARY;
879 }
880
881 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
882 {
883         struct bnx2x *bp = fp->bp;
884         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
885         u16 sw_comp_cons, sw_comp_prod;
886         int rx_pkt = 0;
887         union eth_rx_cqe *cqe;
888         struct eth_fast_path_rx_cqe *cqe_fp;
889
890 #ifdef BNX2X_STOP_ON_ERROR
891         if (unlikely(bp->panic))
892                 return 0;
893 #endif
894         if (budget <= 0)
895                 return rx_pkt;
896
897         bd_cons = fp->rx_bd_cons;
898         bd_prod = fp->rx_bd_prod;
899         bd_prod_fw = bd_prod;
900         sw_comp_cons = fp->rx_comp_cons;
901         sw_comp_prod = fp->rx_comp_prod;
902
903         comp_ring_cons = RCQ_BD(sw_comp_cons);
904         cqe = &fp->rx_comp_ring[comp_ring_cons];
905         cqe_fp = &cqe->fast_path_cqe;
906
907         DP(NETIF_MSG_RX_STATUS,
908            "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
909
910         while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
911                 struct sw_rx_bd *rx_buf = NULL;
912                 struct sk_buff *skb;
913                 u8 cqe_fp_flags;
914                 enum eth_rx_cqe_type cqe_fp_type;
915                 u16 len, pad, queue;
916                 u8 *data;
917                 u32 rxhash;
918                 enum pkt_hash_types rxhash_type;
919
920 #ifdef BNX2X_STOP_ON_ERROR
921                 if (unlikely(bp->panic))
922                         return 0;
923 #endif
924
925                 bd_prod = RX_BD(bd_prod);
926                 bd_cons = RX_BD(bd_cons);
927
928                 /* A rmb() is required to ensure that the CQE is not read
929                  * before it is written by the adapter DMA.  PCI ordering
930                  * rules will make sure the other fields are written before
931                  * the marker at the end of struct eth_fast_path_rx_cqe
932                  * but without rmb() a weakly ordered processor can process
933                  * stale data.  Without the barrier TPA state-machine might
934                  * enter inconsistent state and kernel stack might be
935                  * provided with incorrect packet description - these lead
936                  * to various kernel crashed.
937                  */
938                 rmb();
939
940                 cqe_fp_flags = cqe_fp->type_error_flags;
941                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
942
943                 DP(NETIF_MSG_RX_STATUS,
944                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
945                    CQE_TYPE(cqe_fp_flags),
946                    cqe_fp_flags, cqe_fp->status_flags,
947                    le32_to_cpu(cqe_fp->rss_hash_result),
948                    le16_to_cpu(cqe_fp->vlan_tag),
949                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
950
951                 /* is this a slowpath msg? */
952                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
953                         bnx2x_sp_event(fp, cqe);
954                         goto next_cqe;
955                 }
956
957                 rx_buf = &fp->rx_buf_ring[bd_cons];
958                 data = rx_buf->data;
959
960                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
961                         struct bnx2x_agg_info *tpa_info;
962                         u16 frag_size, pages;
963 #ifdef BNX2X_STOP_ON_ERROR
964                         /* sanity check */
965                         if (fp->mode == TPA_MODE_DISABLED &&
966                             (CQE_TYPE_START(cqe_fp_type) ||
967                              CQE_TYPE_STOP(cqe_fp_type)))
968                                 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
969                                           CQE_TYPE(cqe_fp_type));
970 #endif
971
972                         if (CQE_TYPE_START(cqe_fp_type)) {
973                                 u16 queue = cqe_fp->queue_index;
974                                 DP(NETIF_MSG_RX_STATUS,
975                                    "calling tpa_start on queue %d\n",
976                                    queue);
977
978                                 bnx2x_tpa_start(fp, queue,
979                                                 bd_cons, bd_prod,
980                                                 cqe_fp);
981
982                                 goto next_rx;
983                         }
984                         queue = cqe->end_agg_cqe.queue_index;
985                         tpa_info = &fp->tpa_info[queue];
986                         DP(NETIF_MSG_RX_STATUS,
987                            "calling tpa_stop on queue %d\n",
988                            queue);
989
990                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
991                                     tpa_info->len_on_bd;
992
993                         if (fp->mode == TPA_MODE_GRO)
994                                 pages = (frag_size + tpa_info->full_page - 1) /
995                                          tpa_info->full_page;
996                         else
997                                 pages = SGE_PAGE_ALIGN(frag_size) >>
998                                         SGE_PAGE_SHIFT;
999
1000                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1001                                        &cqe->end_agg_cqe, comp_ring_cons);
1002 #ifdef BNX2X_STOP_ON_ERROR
1003                         if (bp->panic)
1004                                 return 0;
1005 #endif
1006
1007                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1008                         goto next_cqe;
1009                 }
1010                 /* non TPA */
1011                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1012                 pad = cqe_fp->placement_offset;
1013                 dma_sync_single_for_cpu(&bp->pdev->dev,
1014                                         dma_unmap_addr(rx_buf, mapping),
1015                                         pad + RX_COPY_THRESH,
1016                                         DMA_FROM_DEVICE);
1017                 pad += NET_SKB_PAD;
1018                 prefetch(data + pad); /* speedup eth_type_trans() */
1019                 /* is this an error packet? */
1020                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1021                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1022                            "ERROR  flags %x  rx packet %u\n",
1023                            cqe_fp_flags, sw_comp_cons);
1024                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1025                         goto reuse_rx;
1026                 }
1027
1028                 /* Since we don't have a jumbo ring
1029                  * copy small packets if mtu > 1500
1030                  */
1031                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1032                     (len <= RX_COPY_THRESH)) {
1033                         skb = napi_alloc_skb(&fp->napi, len);
1034                         if (skb == NULL) {
1035                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1036                                    "ERROR  packet dropped because of alloc failure\n");
1037                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1038                                 goto reuse_rx;
1039                         }
1040                         memcpy(skb->data, data + pad, len);
1041                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1042                 } else {
1043                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1044                                                        GFP_ATOMIC) == 0)) {
1045                                 dma_unmap_single(&bp->pdev->dev,
1046                                                  dma_unmap_addr(rx_buf, mapping),
1047                                                  fp->rx_buf_size,
1048                                                  DMA_FROM_DEVICE);
1049                                 skb = build_skb(data, fp->rx_frag_size);
1050                                 if (unlikely(!skb)) {
1051                                         bnx2x_frag_free(fp, data);
1052                                         bnx2x_fp_qstats(bp, fp)->
1053                                                         rx_skb_alloc_failed++;
1054                                         goto next_rx;
1055                                 }
1056                                 skb_reserve(skb, pad);
1057                         } else {
1058                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1059                                    "ERROR  packet dropped because of alloc failure\n");
1060                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1061 reuse_rx:
1062                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1063                                 goto next_rx;
1064                         }
1065                 }
1066
1067                 skb_put(skb, len);
1068                 skb->protocol = eth_type_trans(skb, bp->dev);
1069
1070                 /* Set Toeplitz hash for a none-LRO skb */
1071                 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1072                 skb_set_hash(skb, rxhash, rxhash_type);
1073
1074                 skb_checksum_none_assert(skb);
1075
1076                 if (bp->dev->features & NETIF_F_RXCSUM)
1077                         bnx2x_csum_validate(skb, cqe, fp,
1078                                             bnx2x_fp_qstats(bp, fp));
1079
1080                 skb_record_rx_queue(skb, fp->rx_queue);
1081
1082                 /* Check if this packet was timestamped */
1083                 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1084                              (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1085                         bnx2x_set_rx_ts(bp, skb);
1086
1087                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1088                     PARSING_FLAGS_VLAN)
1089                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1090                                                le16_to_cpu(cqe_fp->vlan_tag));
1091
1092                 napi_gro_receive(&fp->napi, skb);
1093 next_rx:
1094                 rx_buf->data = NULL;
1095
1096                 bd_cons = NEXT_RX_IDX(bd_cons);
1097                 bd_prod = NEXT_RX_IDX(bd_prod);
1098                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1099                 rx_pkt++;
1100 next_cqe:
1101                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1102                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1103
1104                 /* mark CQE as free */
1105                 BNX2X_SEED_CQE(cqe_fp);
1106
1107                 if (rx_pkt == budget)
1108                         break;
1109
1110                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1111                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1112                 cqe_fp = &cqe->fast_path_cqe;
1113         } /* while */
1114
1115         fp->rx_bd_cons = bd_cons;
1116         fp->rx_bd_prod = bd_prod_fw;
1117         fp->rx_comp_cons = sw_comp_cons;
1118         fp->rx_comp_prod = sw_comp_prod;
1119
1120         /* Update producers */
1121         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1122                              fp->rx_sge_prod);
1123
1124         return rx_pkt;
1125 }
1126
1127 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1128 {
1129         struct bnx2x_fastpath *fp = fp_cookie;
1130         struct bnx2x *bp = fp->bp;
1131         u8 cos;
1132
1133         DP(NETIF_MSG_INTR,
1134            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1135            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1136
1137         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1138
1139 #ifdef BNX2X_STOP_ON_ERROR
1140         if (unlikely(bp->panic))
1141                 return IRQ_HANDLED;
1142 #endif
1143
1144         /* Handle Rx and Tx according to MSI-X vector */
1145         for_each_cos_in_tx_queue(fp, cos)
1146                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1147
1148         prefetch(&fp->sb_running_index[SM_RX_ID]);
1149         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1150
1151         return IRQ_HANDLED;
1152 }
1153
1154 /* HW Lock for shared dual port PHYs */
1155 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1156 {
1157         mutex_lock(&bp->port.phy_mutex);
1158
1159         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1160 }
1161
1162 void bnx2x_release_phy_lock(struct bnx2x *bp)
1163 {
1164         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1165
1166         mutex_unlock(&bp->port.phy_mutex);
1167 }
1168
1169 /* calculates MF speed according to current linespeed and MF configuration */
1170 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1171 {
1172         u16 line_speed = bp->link_vars.line_speed;
1173         if (IS_MF(bp)) {
1174                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1175                                                    bp->mf_config[BP_VN(bp)]);
1176
1177                 /* Calculate the current MAX line speed limit for the MF
1178                  * devices
1179                  */
1180                 if (IS_MF_PERCENT_BW(bp))
1181                         line_speed = (line_speed * maxCfg) / 100;
1182                 else { /* SD mode */
1183                         u16 vn_max_rate = maxCfg * 100;
1184
1185                         if (vn_max_rate < line_speed)
1186                                 line_speed = vn_max_rate;
1187                 }
1188         }
1189
1190         return line_speed;
1191 }
1192
1193 /**
1194  * bnx2x_fill_report_data - fill link report data to report
1195  *
1196  * @bp:         driver handle
1197  * @data:       link state to update
1198  *
1199  * It uses a none-atomic bit operations because is called under the mutex.
1200  */
1201 static void bnx2x_fill_report_data(struct bnx2x *bp,
1202                                    struct bnx2x_link_report_data *data)
1203 {
1204         memset(data, 0, sizeof(*data));
1205
1206         if (IS_PF(bp)) {
1207                 /* Fill the report data: effective line speed */
1208                 data->line_speed = bnx2x_get_mf_speed(bp);
1209
1210                 /* Link is down */
1211                 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1212                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213                                   &data->link_report_flags);
1214
1215                 if (!BNX2X_NUM_ETH_QUEUES(bp))
1216                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1217                                   &data->link_report_flags);
1218
1219                 /* Full DUPLEX */
1220                 if (bp->link_vars.duplex == DUPLEX_FULL)
1221                         __set_bit(BNX2X_LINK_REPORT_FD,
1222                                   &data->link_report_flags);
1223
1224                 /* Rx Flow Control is ON */
1225                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1226                         __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1227                                   &data->link_report_flags);
1228
1229                 /* Tx Flow Control is ON */
1230                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1231                         __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1232                                   &data->link_report_flags);
1233         } else { /* VF */
1234                 *data = bp->vf_link_vars;
1235         }
1236 }
1237
1238 /**
1239  * bnx2x_link_report - report link status to OS.
1240  *
1241  * @bp:         driver handle
1242  *
1243  * Calls the __bnx2x_link_report() under the same locking scheme
1244  * as a link/PHY state managing code to ensure a consistent link
1245  * reporting.
1246  */
1247
1248 void bnx2x_link_report(struct bnx2x *bp)
1249 {
1250         bnx2x_acquire_phy_lock(bp);
1251         __bnx2x_link_report(bp);
1252         bnx2x_release_phy_lock(bp);
1253 }
1254
1255 /**
1256  * __bnx2x_link_report - report link status to OS.
1257  *
1258  * @bp:         driver handle
1259  *
1260  * None atomic implementation.
1261  * Should be called under the phy_lock.
1262  */
1263 void __bnx2x_link_report(struct bnx2x *bp)
1264 {
1265         struct bnx2x_link_report_data cur_data;
1266
1267         if (bp->force_link_down) {
1268                 bp->link_vars.link_up = 0;
1269                 return;
1270         }
1271
1272         /* reread mf_cfg */
1273         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1274                 bnx2x_read_mf_cfg(bp);
1275
1276         /* Read the current link report info */
1277         bnx2x_fill_report_data(bp, &cur_data);
1278
1279         /* Don't report link down or exactly the same link status twice */
1280         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1281             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1282                       &bp->last_reported_link.link_report_flags) &&
1283              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1284                       &cur_data.link_report_flags)))
1285                 return;
1286
1287         bp->link_cnt++;
1288
1289         /* We are going to report a new link parameters now -
1290          * remember the current data for the next time.
1291          */
1292         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1293
1294         /* propagate status to VFs */
1295         if (IS_PF(bp))
1296                 bnx2x_iov_link_update(bp);
1297
1298         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1299                      &cur_data.link_report_flags)) {
1300                 netif_carrier_off(bp->dev);
1301                 netdev_err(bp->dev, "NIC Link is Down\n");
1302                 return;
1303         } else {
1304                 const char *duplex;
1305                 const char *flow;
1306
1307                 netif_carrier_on(bp->dev);
1308
1309                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1310                                        &cur_data.link_report_flags))
1311                         duplex = "full";
1312                 else
1313                         duplex = "half";
1314
1315                 /* Handle the FC at the end so that only these flags would be
1316                  * possibly set. This way we may easily check if there is no FC
1317                  * enabled.
1318                  */
1319                 if (cur_data.link_report_flags) {
1320                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1321                                      &cur_data.link_report_flags)) {
1322                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1323                                      &cur_data.link_report_flags))
1324                                         flow = "ON - receive & transmit";
1325                                 else
1326                                         flow = "ON - receive";
1327                         } else {
1328                                 flow = "ON - transmit";
1329                         }
1330                 } else {
1331                         flow = "none";
1332                 }
1333                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1334                             cur_data.line_speed, duplex, flow);
1335         }
1336 }
1337
1338 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1339 {
1340         int i;
1341
1342         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1343                 struct eth_rx_sge *sge;
1344
1345                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1346                 sge->addr_hi =
1347                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1348                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1349
1350                 sge->addr_lo =
1351                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1352                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1353         }
1354 }
1355
1356 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1357                                 struct bnx2x_fastpath *fp, int last)
1358 {
1359         int i;
1360
1361         for (i = 0; i < last; i++) {
1362                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1363                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1364                 u8 *data = first_buf->data;
1365
1366                 if (data == NULL) {
1367                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1368                         continue;
1369                 }
1370                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1371                         dma_unmap_single(&bp->pdev->dev,
1372                                          dma_unmap_addr(first_buf, mapping),
1373                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1374                 bnx2x_frag_free(fp, data);
1375                 first_buf->data = NULL;
1376         }
1377 }
1378
1379 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1380 {
1381         int j;
1382
1383         for_each_rx_queue_cnic(bp, j) {
1384                 struct bnx2x_fastpath *fp = &bp->fp[j];
1385
1386                 fp->rx_bd_cons = 0;
1387
1388                 /* Activate BD ring */
1389                 /* Warning!
1390                  * this will generate an interrupt (to the TSTORM)
1391                  * must only be done after chip is initialized
1392                  */
1393                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1394                                      fp->rx_sge_prod);
1395         }
1396 }
1397
1398 void bnx2x_init_rx_rings(struct bnx2x *bp)
1399 {
1400         int func = BP_FUNC(bp);
1401         u16 ring_prod;
1402         int i, j;
1403
1404         /* Allocate TPA resources */
1405         for_each_eth_queue(bp, j) {
1406                 struct bnx2x_fastpath *fp = &bp->fp[j];
1407
1408                 DP(NETIF_MSG_IFUP,
1409                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1410
1411                 if (fp->mode != TPA_MODE_DISABLED) {
1412                         /* Fill the per-aggregation pool */
1413                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1414                                 struct bnx2x_agg_info *tpa_info =
1415                                         &fp->tpa_info[i];
1416                                 struct sw_rx_bd *first_buf =
1417                                         &tpa_info->first_buf;
1418
1419                                 first_buf->data =
1420                                         bnx2x_frag_alloc(fp, GFP_KERNEL);
1421                                 if (!first_buf->data) {
1422                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1423                                                   j);
1424                                         bnx2x_free_tpa_pool(bp, fp, i);
1425                                         fp->mode = TPA_MODE_DISABLED;
1426                                         break;
1427                                 }
1428                                 dma_unmap_addr_set(first_buf, mapping, 0);
1429                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1430                         }
1431
1432                         /* "next page" elements initialization */
1433                         bnx2x_set_next_page_sgl(fp);
1434
1435                         /* set SGEs bit mask */
1436                         bnx2x_init_sge_ring_bit_mask(fp);
1437
1438                         /* Allocate SGEs and initialize the ring elements */
1439                         for (i = 0, ring_prod = 0;
1440                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1441
1442                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1443                                                        GFP_KERNEL) < 0) {
1444                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1445                                                   i);
1446                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1447                                                   j);
1448                                         /* Cleanup already allocated elements */
1449                                         bnx2x_free_rx_sge_range(bp, fp,
1450                                                                 ring_prod);
1451                                         bnx2x_free_tpa_pool(bp, fp,
1452                                                             MAX_AGG_QS(bp));
1453                                         fp->mode = TPA_MODE_DISABLED;
1454                                         ring_prod = 0;
1455                                         break;
1456                                 }
1457                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1458                         }
1459
1460                         fp->rx_sge_prod = ring_prod;
1461                 }
1462         }
1463
1464         for_each_eth_queue(bp, j) {
1465                 struct bnx2x_fastpath *fp = &bp->fp[j];
1466
1467                 fp->rx_bd_cons = 0;
1468
1469                 /* Activate BD ring */
1470                 /* Warning!
1471                  * this will generate an interrupt (to the TSTORM)
1472                  * must only be done after chip is initialized
1473                  */
1474                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1475                                      fp->rx_sge_prod);
1476
1477                 if (j != 0)
1478                         continue;
1479
1480                 if (CHIP_IS_E1(bp)) {
1481                         REG_WR(bp, BAR_USTRORM_INTMEM +
1482                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1483                                U64_LO(fp->rx_comp_mapping));
1484                         REG_WR(bp, BAR_USTRORM_INTMEM +
1485                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1486                                U64_HI(fp->rx_comp_mapping));
1487                 }
1488         }
1489 }
1490
1491 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1492 {
1493         u8 cos;
1494         struct bnx2x *bp = fp->bp;
1495
1496         for_each_cos_in_tx_queue(fp, cos) {
1497                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1498                 unsigned pkts_compl = 0, bytes_compl = 0;
1499
1500                 u16 sw_prod = txdata->tx_pkt_prod;
1501                 u16 sw_cons = txdata->tx_pkt_cons;
1502
1503                 while (sw_cons != sw_prod) {
1504                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1505                                           &pkts_compl, &bytes_compl);
1506                         sw_cons++;
1507                 }
1508
1509                 netdev_tx_reset_queue(
1510                         netdev_get_tx_queue(bp->dev,
1511                                             txdata->txq_index));
1512         }
1513 }
1514
1515 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1516 {
1517         int i;
1518
1519         for_each_tx_queue_cnic(bp, i) {
1520                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1521         }
1522 }
1523
1524 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1525 {
1526         int i;
1527
1528         for_each_eth_queue(bp, i) {
1529                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1530         }
1531 }
1532
1533 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1534 {
1535         struct bnx2x *bp = fp->bp;
1536         int i;
1537
1538         /* ring wasn't allocated */
1539         if (fp->rx_buf_ring == NULL)
1540                 return;
1541
1542         for (i = 0; i < NUM_RX_BD; i++) {
1543                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1544                 u8 *data = rx_buf->data;
1545
1546                 if (data == NULL)
1547                         continue;
1548                 dma_unmap_single(&bp->pdev->dev,
1549                                  dma_unmap_addr(rx_buf, mapping),
1550                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1551
1552                 rx_buf->data = NULL;
1553                 bnx2x_frag_free(fp, data);
1554         }
1555 }
1556
1557 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1558 {
1559         int j;
1560
1561         for_each_rx_queue_cnic(bp, j) {
1562                 bnx2x_free_rx_bds(&bp->fp[j]);
1563         }
1564 }
1565
1566 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1567 {
1568         int j;
1569
1570         for_each_eth_queue(bp, j) {
1571                 struct bnx2x_fastpath *fp = &bp->fp[j];
1572
1573                 bnx2x_free_rx_bds(fp);
1574
1575                 if (fp->mode != TPA_MODE_DISABLED)
1576                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1577         }
1578 }
1579
1580 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1581 {
1582         bnx2x_free_tx_skbs_cnic(bp);
1583         bnx2x_free_rx_skbs_cnic(bp);
1584 }
1585
1586 void bnx2x_free_skbs(struct bnx2x *bp)
1587 {
1588         bnx2x_free_tx_skbs(bp);
1589         bnx2x_free_rx_skbs(bp);
1590 }
1591
1592 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1593 {
1594         /* load old values */
1595         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1596
1597         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1598                 /* leave all but MAX value */
1599                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1600
1601                 /* set new MAX value */
1602                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1603                                 & FUNC_MF_CFG_MAX_BW_MASK;
1604
1605                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1606         }
1607 }
1608
1609 /**
1610  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1611  *
1612  * @bp:         driver handle
1613  * @nvecs:      number of vectors to be released
1614  */
1615 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1616 {
1617         int i, offset = 0;
1618
1619         if (nvecs == offset)
1620                 return;
1621
1622         /* VFs don't have a default SB */
1623         if (IS_PF(bp)) {
1624                 free_irq(bp->msix_table[offset].vector, bp->dev);
1625                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1626                    bp->msix_table[offset].vector);
1627                 offset++;
1628         }
1629
1630         if (CNIC_SUPPORT(bp)) {
1631                 if (nvecs == offset)
1632                         return;
1633                 offset++;
1634         }
1635
1636         for_each_eth_queue(bp, i) {
1637                 if (nvecs == offset)
1638                         return;
1639                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1640                    i, bp->msix_table[offset].vector);
1641
1642                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1643         }
1644 }
1645
1646 void bnx2x_free_irq(struct bnx2x *bp)
1647 {
1648         if (bp->flags & USING_MSIX_FLAG &&
1649             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1650                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1651
1652                 /* vfs don't have a default status block */
1653                 if (IS_PF(bp))
1654                         nvecs++;
1655
1656                 bnx2x_free_msix_irqs(bp, nvecs);
1657         } else {
1658                 free_irq(bp->dev->irq, bp->dev);
1659         }
1660 }
1661
1662 int bnx2x_enable_msix(struct bnx2x *bp)
1663 {
1664         int msix_vec = 0, i, rc;
1665
1666         /* VFs don't have a default status block */
1667         if (IS_PF(bp)) {
1668                 bp->msix_table[msix_vec].entry = msix_vec;
1669                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1670                                bp->msix_table[0].entry);
1671                 msix_vec++;
1672         }
1673
1674         /* Cnic requires an msix vector for itself */
1675         if (CNIC_SUPPORT(bp)) {
1676                 bp->msix_table[msix_vec].entry = msix_vec;
1677                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1678                                msix_vec, bp->msix_table[msix_vec].entry);
1679                 msix_vec++;
1680         }
1681
1682         /* We need separate vectors for ETH queues only (not FCoE) */
1683         for_each_eth_queue(bp, i) {
1684                 bp->msix_table[msix_vec].entry = msix_vec;
1685                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1686                                msix_vec, msix_vec, i);
1687                 msix_vec++;
1688         }
1689
1690         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1691            msix_vec);
1692
1693         rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1694                                    BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1695         /*
1696          * reconfigure number of tx/rx queues according to available
1697          * MSI-X vectors
1698          */
1699         if (rc == -ENOSPC) {
1700                 /* Get by with single vector */
1701                 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1702                 if (rc < 0) {
1703                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1704                                        rc);
1705                         goto no_msix;
1706                 }
1707
1708                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1709                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1710
1711                 BNX2X_DEV_INFO("set number of queues to 1\n");
1712                 bp->num_ethernet_queues = 1;
1713                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1714         } else if (rc < 0) {
1715                 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1716                 goto no_msix;
1717         } else if (rc < msix_vec) {
1718                 /* how less vectors we will have? */
1719                 int diff = msix_vec - rc;
1720
1721                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1722
1723                 /*
1724                  * decrease number of queues by number of unallocated entries
1725                  */
1726                 bp->num_ethernet_queues -= diff;
1727                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1728
1729                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1730                                bp->num_queues);
1731         }
1732
1733         bp->flags |= USING_MSIX_FLAG;
1734
1735         return 0;
1736
1737 no_msix:
1738         /* fall to INTx if not enough memory */
1739         if (rc == -ENOMEM)
1740                 bp->flags |= DISABLE_MSI_FLAG;
1741
1742         return rc;
1743 }
1744
1745 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1746 {
1747         int i, rc, offset = 0;
1748
1749         /* no default status block for vf */
1750         if (IS_PF(bp)) {
1751                 rc = request_irq(bp->msix_table[offset++].vector,
1752                                  bnx2x_msix_sp_int, 0,
1753                                  bp->dev->name, bp->dev);
1754                 if (rc) {
1755                         BNX2X_ERR("request sp irq failed\n");
1756                         return -EBUSY;
1757                 }
1758         }
1759
1760         if (CNIC_SUPPORT(bp))
1761                 offset++;
1762
1763         for_each_eth_queue(bp, i) {
1764                 struct bnx2x_fastpath *fp = &bp->fp[i];
1765                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1766                          bp->dev->name, i);
1767
1768                 rc = request_irq(bp->msix_table[offset].vector,
1769                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1770                 if (rc) {
1771                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1772                               bp->msix_table[offset].vector, rc);
1773                         bnx2x_free_msix_irqs(bp, offset);
1774                         return -EBUSY;
1775                 }
1776
1777                 offset++;
1778         }
1779
1780         i = BNX2X_NUM_ETH_QUEUES(bp);
1781         if (IS_PF(bp)) {
1782                 offset = 1 + CNIC_SUPPORT(bp);
1783                 netdev_info(bp->dev,
1784                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1785                             bp->msix_table[0].vector,
1786                             0, bp->msix_table[offset].vector,
1787                             i - 1, bp->msix_table[offset + i - 1].vector);
1788         } else {
1789                 offset = CNIC_SUPPORT(bp);
1790                 netdev_info(bp->dev,
1791                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1792                             0, bp->msix_table[offset].vector,
1793                             i - 1, bp->msix_table[offset + i - 1].vector);
1794         }
1795         return 0;
1796 }
1797
1798 int bnx2x_enable_msi(struct bnx2x *bp)
1799 {
1800         int rc;
1801
1802         rc = pci_enable_msi(bp->pdev);
1803         if (rc) {
1804                 BNX2X_DEV_INFO("MSI is not attainable\n");
1805                 return -1;
1806         }
1807         bp->flags |= USING_MSI_FLAG;
1808
1809         return 0;
1810 }
1811
1812 static int bnx2x_req_irq(struct bnx2x *bp)
1813 {
1814         unsigned long flags;
1815         unsigned int irq;
1816
1817         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1818                 flags = 0;
1819         else
1820                 flags = IRQF_SHARED;
1821
1822         if (bp->flags & USING_MSIX_FLAG)
1823                 irq = bp->msix_table[0].vector;
1824         else
1825                 irq = bp->pdev->irq;
1826
1827         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1828 }
1829
1830 static int bnx2x_setup_irqs(struct bnx2x *bp)
1831 {
1832         int rc = 0;
1833         if (bp->flags & USING_MSIX_FLAG &&
1834             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1835                 rc = bnx2x_req_msix_irqs(bp);
1836                 if (rc)
1837                         return rc;
1838         } else {
1839                 rc = bnx2x_req_irq(bp);
1840                 if (rc) {
1841                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1842                         return rc;
1843                 }
1844                 if (bp->flags & USING_MSI_FLAG) {
1845                         bp->dev->irq = bp->pdev->irq;
1846                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1847                                     bp->dev->irq);
1848                 }
1849                 if (bp->flags & USING_MSIX_FLAG) {
1850                         bp->dev->irq = bp->msix_table[0].vector;
1851                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1852                                     bp->dev->irq);
1853                 }
1854         }
1855
1856         return 0;
1857 }
1858
1859 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1860 {
1861         int i;
1862
1863         for_each_rx_queue_cnic(bp, i) {
1864                 napi_enable(&bnx2x_fp(bp, i, napi));
1865         }
1866 }
1867
1868 static void bnx2x_napi_enable(struct bnx2x *bp)
1869 {
1870         int i;
1871
1872         for_each_eth_queue(bp, i) {
1873                 napi_enable(&bnx2x_fp(bp, i, napi));
1874         }
1875 }
1876
1877 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1878 {
1879         int i;
1880
1881         for_each_rx_queue_cnic(bp, i) {
1882                 napi_disable(&bnx2x_fp(bp, i, napi));
1883         }
1884 }
1885
1886 static void bnx2x_napi_disable(struct bnx2x *bp)
1887 {
1888         int i;
1889
1890         for_each_eth_queue(bp, i) {
1891                 napi_disable(&bnx2x_fp(bp, i, napi));
1892         }
1893 }
1894
1895 void bnx2x_netif_start(struct bnx2x *bp)
1896 {
1897         if (netif_running(bp->dev)) {
1898                 bnx2x_napi_enable(bp);
1899                 if (CNIC_LOADED(bp))
1900                         bnx2x_napi_enable_cnic(bp);
1901                 bnx2x_int_enable(bp);
1902                 if (bp->state == BNX2X_STATE_OPEN)
1903                         netif_tx_wake_all_queues(bp->dev);
1904         }
1905 }
1906
1907 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1908 {
1909         bnx2x_int_disable_sync(bp, disable_hw);
1910         bnx2x_napi_disable(bp);
1911         if (CNIC_LOADED(bp))
1912                 bnx2x_napi_disable_cnic(bp);
1913 }
1914
1915 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1916                        struct net_device *sb_dev,
1917                        select_queue_fallback_t fallback)
1918 {
1919         struct bnx2x *bp = netdev_priv(dev);
1920
1921         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1922                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1923                 u16 ether_type = ntohs(hdr->h_proto);
1924
1925                 /* Skip VLAN tag if present */
1926                 if (ether_type == ETH_P_8021Q) {
1927                         struct vlan_ethhdr *vhdr =
1928                                 (struct vlan_ethhdr *)skb->data;
1929
1930                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1931                 }
1932
1933                 /* If ethertype is FCoE or FIP - use FCoE ring */
1934                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1935                         return bnx2x_fcoe_tx(bp, txq_index);
1936         }
1937
1938         /* select a non-FCoE queue */
1939         return fallback(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
1940 }
1941
1942 void bnx2x_set_num_queues(struct bnx2x *bp)
1943 {
1944         /* RSS queues */
1945         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1946
1947         /* override in STORAGE SD modes */
1948         if (IS_MF_STORAGE_ONLY(bp))
1949                 bp->num_ethernet_queues = 1;
1950
1951         /* Add special queues */
1952         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1953         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1954
1955         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1956 }
1957
1958 /**
1959  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1960  *
1961  * @bp:         Driver handle
1962  *
1963  * We currently support for at most 16 Tx queues for each CoS thus we will
1964  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1965  * bp->max_cos.
1966  *
1967  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1968  * index after all ETH L2 indices.
1969  *
1970  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1971  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1972  * 16..31,...) with indices that are not coupled with any real Tx queue.
1973  *
1974  * The proper configuration of skb->queue_mapping is handled by
1975  * bnx2x_select_queue() and __skb_tx_hash().
1976  *
1977  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1978  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1979  */
1980 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1981 {
1982         int rc, tx, rx;
1983
1984         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1985         rx = BNX2X_NUM_ETH_QUEUES(bp);
1986
1987 /* account for fcoe queue */
1988         if (include_cnic && !NO_FCOE(bp)) {
1989                 rx++;
1990                 tx++;
1991         }
1992
1993         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1994         if (rc) {
1995                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1996                 return rc;
1997         }
1998         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1999         if (rc) {
2000                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2001                 return rc;
2002         }
2003
2004         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2005                           tx, rx);
2006
2007         return rc;
2008 }
2009
2010 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2011 {
2012         int i;
2013
2014         for_each_queue(bp, i) {
2015                 struct bnx2x_fastpath *fp = &bp->fp[i];
2016                 u32 mtu;
2017
2018                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2019                 if (IS_FCOE_IDX(i))
2020                         /*
2021                          * Although there are no IP frames expected to arrive to
2022                          * this ring we still want to add an
2023                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2024                          * overrun attack.
2025                          */
2026                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2027                 else
2028                         mtu = bp->dev->mtu;
2029                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2030                                   IP_HEADER_ALIGNMENT_PADDING +
2031                                   ETH_OVERHEAD +
2032                                   mtu +
2033                                   BNX2X_FW_RX_ALIGN_END;
2034                 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2035                 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2036                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2037                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2038                 else
2039                         fp->rx_frag_size = 0;
2040         }
2041 }
2042
2043 static int bnx2x_init_rss(struct bnx2x *bp)
2044 {
2045         int i;
2046         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2047
2048         /* Prepare the initial contents for the indirection table if RSS is
2049          * enabled
2050          */
2051         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2052                 bp->rss_conf_obj.ind_table[i] =
2053                         bp->fp->cl_id +
2054                         ethtool_rxfh_indir_default(i, num_eth_queues);
2055
2056         /*
2057          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2058          * per-port, so if explicit configuration is needed , do it only
2059          * for a PMF.
2060          *
2061          * For 57712 and newer on the other hand it's a per-function
2062          * configuration.
2063          */
2064         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2065 }
2066
2067 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2068               bool config_hash, bool enable)
2069 {
2070         struct bnx2x_config_rss_params params = {NULL};
2071
2072         /* Although RSS is meaningless when there is a single HW queue we
2073          * still need it enabled in order to have HW Rx hash generated.
2074          *
2075          * if (!is_eth_multi(bp))
2076          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2077          */
2078
2079         params.rss_obj = rss_obj;
2080
2081         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2082
2083         if (enable) {
2084                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2085
2086                 /* RSS configuration */
2087                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2088                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2089                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2090                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2091                 if (rss_obj->udp_rss_v4)
2092                         __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2093                 if (rss_obj->udp_rss_v6)
2094                         __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2095
2096                 if (!CHIP_IS_E1x(bp)) {
2097                         /* valid only for TUNN_MODE_VXLAN tunnel mode */
2098                         __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2099                         __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2100
2101                         /* valid only for TUNN_MODE_GRE tunnel mode */
2102                         __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2103                 }
2104         } else {
2105                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2106         }
2107
2108         /* Hash bits */
2109         params.rss_result_mask = MULTI_MASK;
2110
2111         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2112
2113         if (config_hash) {
2114                 /* RSS keys */
2115                 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2116                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2117         }
2118
2119         if (IS_PF(bp))
2120                 return bnx2x_config_rss(bp, &params);
2121         else
2122                 return bnx2x_vfpf_config_rss(bp, &params);
2123 }
2124
2125 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2126 {
2127         struct bnx2x_func_state_params func_params = {NULL};
2128
2129         /* Prepare parameters for function state transitions */
2130         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2131
2132         func_params.f_obj = &bp->func_obj;
2133         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2134
2135         func_params.params.hw_init.load_phase = load_code;
2136
2137         return bnx2x_func_state_change(bp, &func_params);
2138 }
2139
2140 /*
2141  * Cleans the object that have internal lists without sending
2142  * ramrods. Should be run when interrupts are disabled.
2143  */
2144 void bnx2x_squeeze_objects(struct bnx2x *bp)
2145 {
2146         int rc;
2147         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2148         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2149         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2150
2151         /***************** Cleanup MACs' object first *************************/
2152
2153         /* Wait for completion of requested */
2154         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2155         /* Perform a dry cleanup */
2156         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2157
2158         /* Clean ETH primary MAC */
2159         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2160         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2161                                  &ramrod_flags);
2162         if (rc != 0)
2163                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2164
2165         /* Cleanup UC list */
2166         vlan_mac_flags = 0;
2167         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2168         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2169                                  &ramrod_flags);
2170         if (rc != 0)
2171                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2172
2173         /***************** Now clean mcast object *****************************/
2174         rparam.mcast_obj = &bp->mcast_obj;
2175         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2176
2177         /* Add a DEL command... - Since we're doing a driver cleanup only,
2178          * we take a lock surrounding both the initial send and the CONTs,
2179          * as we don't want a true completion to disrupt us in the middle.
2180          */
2181         netif_addr_lock_bh(bp->dev);
2182         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2183         if (rc < 0)
2184                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2185                           rc);
2186
2187         /* ...and wait until all pending commands are cleared */
2188         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2189         while (rc != 0) {
2190                 if (rc < 0) {
2191                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2192                                   rc);
2193                         netif_addr_unlock_bh(bp->dev);
2194                         return;
2195                 }
2196
2197                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2198         }
2199         netif_addr_unlock_bh(bp->dev);
2200 }
2201
2202 #ifndef BNX2X_STOP_ON_ERROR
2203 #define LOAD_ERROR_EXIT(bp, label) \
2204         do { \
2205                 (bp)->state = BNX2X_STATE_ERROR; \
2206                 goto label; \
2207         } while (0)
2208
2209 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2210         do { \
2211                 bp->cnic_loaded = false; \
2212                 goto label; \
2213         } while (0)
2214 #else /*BNX2X_STOP_ON_ERROR*/
2215 #define LOAD_ERROR_EXIT(bp, label) \
2216         do { \
2217                 (bp)->state = BNX2X_STATE_ERROR; \
2218                 (bp)->panic = 1; \
2219                 return -EBUSY; \
2220         } while (0)
2221 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2222         do { \
2223                 bp->cnic_loaded = false; \
2224                 (bp)->panic = 1; \
2225                 return -EBUSY; \
2226         } while (0)
2227 #endif /*BNX2X_STOP_ON_ERROR*/
2228
2229 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2230 {
2231         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2232                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2233         return;
2234 }
2235
2236 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2237 {
2238         int num_groups, vf_headroom = 0;
2239         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2240
2241         /* number of queues for statistics is number of eth queues + FCoE */
2242         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2243
2244         /* Total number of FW statistics requests =
2245          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2246          * and fcoe l2 queue) stats + num of queues (which includes another 1
2247          * for fcoe l2 queue if applicable)
2248          */
2249         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2250
2251         /* vf stats appear in the request list, but their data is allocated by
2252          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2253          * it is used to determine where to place the vf stats queries in the
2254          * request struct
2255          */
2256         if (IS_SRIOV(bp))
2257                 vf_headroom = bnx2x_vf_headroom(bp);
2258
2259         /* Request is built from stats_query_header and an array of
2260          * stats_query_cmd_group each of which contains
2261          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2262          * configured in the stats_query_header.
2263          */
2264         num_groups =
2265                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2266                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2267                  1 : 0));
2268
2269         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2270            bp->fw_stats_num, vf_headroom, num_groups);
2271         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2272                 num_groups * sizeof(struct stats_query_cmd_group);
2273
2274         /* Data for statistics requests + stats_counter
2275          * stats_counter holds per-STORM counters that are incremented
2276          * when STORM has finished with the current request.
2277          * memory for FCoE offloaded statistics are counted anyway,
2278          * even if they will not be sent.
2279          * VF stats are not accounted for here as the data of VF stats is stored
2280          * in memory allocated by the VF, not here.
2281          */
2282         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2283                 sizeof(struct per_pf_stats) +
2284                 sizeof(struct fcoe_statistics_params) +
2285                 sizeof(struct per_queue_stats) * num_queue_stats +
2286                 sizeof(struct stats_counter);
2287
2288         bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2289                                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2290         if (!bp->fw_stats)
2291                 goto alloc_mem_err;
2292
2293         /* Set shortcuts */
2294         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2295         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2296         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2297                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2298         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2299                 bp->fw_stats_req_sz;
2300
2301         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2302            U64_HI(bp->fw_stats_req_mapping),
2303            U64_LO(bp->fw_stats_req_mapping));
2304         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2305            U64_HI(bp->fw_stats_data_mapping),
2306            U64_LO(bp->fw_stats_data_mapping));
2307         return 0;
2308
2309 alloc_mem_err:
2310         bnx2x_free_fw_stats_mem(bp);
2311         BNX2X_ERR("Can't allocate FW stats memory\n");
2312         return -ENOMEM;
2313 }
2314
2315 /* send load request to mcp and analyze response */
2316 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2317 {
2318         u32 param;
2319
2320         /* init fw_seq */
2321         bp->fw_seq =
2322                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2323                  DRV_MSG_SEQ_NUMBER_MASK);
2324         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2325
2326         /* Get current FW pulse sequence */
2327         bp->fw_drv_pulse_wr_seq =
2328                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2329                  DRV_PULSE_SEQ_MASK);
2330         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2331
2332         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2333
2334         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2335                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2336
2337         /* load request */
2338         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2339
2340         /* if mcp fails to respond we must abort */
2341         if (!(*load_code)) {
2342                 BNX2X_ERR("MCP response failure, aborting\n");
2343                 return -EBUSY;
2344         }
2345
2346         /* If mcp refused (e.g. other port is in diagnostic mode) we
2347          * must abort
2348          */
2349         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2350                 BNX2X_ERR("MCP refused load request, aborting\n");
2351                 return -EBUSY;
2352         }
2353         return 0;
2354 }
2355
2356 /* check whether another PF has already loaded FW to chip. In
2357  * virtualized environments a pf from another VM may have already
2358  * initialized the device including loading FW
2359  */
2360 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2361 {
2362         /* is another pf loaded on this engine? */
2363         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2364             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2365                 /* build my FW version dword */
2366                 /*(DEBLOBBED)*/
2367
2368                 /* read loaded FW from chip */
2369                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2370
2371                 u32 my_fw = ~loaded_fw;
2372
2373                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2374                    loaded_fw, my_fw);
2375
2376                 /* abort nic load if version mismatch */
2377                 if (my_fw != loaded_fw) {
2378                         if (print_err)
2379                                 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2380                                           loaded_fw, my_fw);
2381                         else
2382                                 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2383                                                loaded_fw, my_fw);
2384                         return -EBUSY;
2385                 }
2386         }
2387         return 0;
2388 }
2389
2390 /* returns the "mcp load_code" according to global load_count array */
2391 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2392 {
2393         int path = BP_PATH(bp);
2394
2395         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2396            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2397            bnx2x_load_count[path][2]);
2398         bnx2x_load_count[path][0]++;
2399         bnx2x_load_count[path][1 + port]++;
2400         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2401            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2402            bnx2x_load_count[path][2]);
2403         if (bnx2x_load_count[path][0] == 1)
2404                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2405         else if (bnx2x_load_count[path][1 + port] == 1)
2406                 return FW_MSG_CODE_DRV_LOAD_PORT;
2407         else
2408                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2409 }
2410
2411 /* mark PMF if applicable */
2412 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2413 {
2414         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2415             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2416             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2417                 bp->port.pmf = 1;
2418                 /* We need the barrier to ensure the ordering between the
2419                  * writing to bp->port.pmf here and reading it from the
2420                  * bnx2x_periodic_task().
2421                  */
2422                 smp_mb();
2423         } else {
2424                 bp->port.pmf = 0;
2425         }
2426
2427         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2428 }
2429
2430 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2431 {
2432         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2433              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2434             (bp->common.shmem2_base)) {
2435                 if (SHMEM2_HAS(bp, dcc_support))
2436                         SHMEM2_WR(bp, dcc_support,
2437                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2438                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2439                 if (SHMEM2_HAS(bp, afex_driver_support))
2440                         SHMEM2_WR(bp, afex_driver_support,
2441                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2442         }
2443
2444         /* Set AFEX default VLAN tag to an invalid value */
2445         bp->afex_def_vlan_tag = -1;
2446 }
2447
2448 /**
2449  * bnx2x_bz_fp - zero content of the fastpath structure.
2450  *
2451  * @bp:         driver handle
2452  * @index:      fastpath index to be zeroed
2453  *
2454  * Makes sure the contents of the bp->fp[index].napi is kept
2455  * intact.
2456  */
2457 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2458 {
2459         struct bnx2x_fastpath *fp = &bp->fp[index];
2460         int cos;
2461         struct napi_struct orig_napi = fp->napi;
2462         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2463
2464         /* bzero bnx2x_fastpath contents */
2465         if (fp->tpa_info)
2466                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2467                        sizeof(struct bnx2x_agg_info));
2468         memset(fp, 0, sizeof(*fp));
2469
2470         /* Restore the NAPI object as it has been already initialized */
2471         fp->napi = orig_napi;
2472         fp->tpa_info = orig_tpa_info;
2473         fp->bp = bp;
2474         fp->index = index;
2475         if (IS_ETH_FP(fp))
2476                 fp->max_cos = bp->max_cos;
2477         else
2478                 /* Special queues support only one CoS */
2479                 fp->max_cos = 1;
2480
2481         /* Init txdata pointers */
2482         if (IS_FCOE_FP(fp))
2483                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2484         if (IS_ETH_FP(fp))
2485                 for_each_cos_in_tx_queue(fp, cos)
2486                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2487                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2488
2489         /* set the tpa flag for each queue. The tpa flag determines the queue
2490          * minimal size so it must be set prior to queue memory allocation
2491          */
2492         if (bp->dev->features & NETIF_F_LRO)
2493                 fp->mode = TPA_MODE_LRO;
2494         else if (bp->dev->features & NETIF_F_GRO_HW)
2495                 fp->mode = TPA_MODE_GRO;
2496         else
2497                 fp->mode = TPA_MODE_DISABLED;
2498
2499         /* We don't want TPA if it's disabled in bp
2500          * or if this is an FCoE L2 ring.
2501          */
2502         if (bp->disable_tpa || IS_FCOE_FP(fp))
2503                 fp->mode = TPA_MODE_DISABLED;
2504 }
2505
2506 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2507 {
2508         u32 cur;
2509
2510         if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2511                 return;
2512
2513         cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2514         DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2515            cur, state);
2516
2517         SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2518 }
2519
2520 int bnx2x_load_cnic(struct bnx2x *bp)
2521 {
2522         int i, rc, port = BP_PORT(bp);
2523
2524         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2525
2526         mutex_init(&bp->cnic_mutex);
2527
2528         if (IS_PF(bp)) {
2529                 rc = bnx2x_alloc_mem_cnic(bp);
2530                 if (rc) {
2531                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2532                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2533                 }
2534         }
2535
2536         rc = bnx2x_alloc_fp_mem_cnic(bp);
2537         if (rc) {
2538                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2539                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2540         }
2541
2542         /* Update the number of queues with the cnic queues */
2543         rc = bnx2x_set_real_num_queues(bp, 1);
2544         if (rc) {
2545                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2546                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547         }
2548
2549         /* Add all CNIC NAPI objects */
2550         bnx2x_add_all_napi_cnic(bp);
2551         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2552         bnx2x_napi_enable_cnic(bp);
2553
2554         rc = bnx2x_init_hw_func_cnic(bp);
2555         if (rc)
2556                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2557
2558         bnx2x_nic_init_cnic(bp);
2559
2560         if (IS_PF(bp)) {
2561                 /* Enable Timer scan */
2562                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2563
2564                 /* setup cnic queues */
2565                 for_each_cnic_queue(bp, i) {
2566                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2567                         if (rc) {
2568                                 BNX2X_ERR("Queue setup failed\n");
2569                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2570                         }
2571                 }
2572         }
2573
2574         /* Initialize Rx filter. */
2575         bnx2x_set_rx_mode_inner(bp);
2576
2577         /* re-read iscsi info */
2578         bnx2x_get_iscsi_info(bp);
2579         bnx2x_setup_cnic_irq_info(bp);
2580         bnx2x_setup_cnic_info(bp);
2581         bp->cnic_loaded = true;
2582         if (bp->state == BNX2X_STATE_OPEN)
2583                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2584
2585         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2586
2587         return 0;
2588
2589 #ifndef BNX2X_STOP_ON_ERROR
2590 load_error_cnic2:
2591         /* Disable Timer scan */
2592         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2593
2594 load_error_cnic1:
2595         bnx2x_napi_disable_cnic(bp);
2596         /* Update the number of queues without the cnic queues */
2597         if (bnx2x_set_real_num_queues(bp, 0))
2598                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2599 load_error_cnic0:
2600         BNX2X_ERR("CNIC-related load failed\n");
2601         bnx2x_free_fp_mem_cnic(bp);
2602         bnx2x_free_mem_cnic(bp);
2603         return rc;
2604 #endif /* ! BNX2X_STOP_ON_ERROR */
2605 }
2606
2607 /* must be called with rtnl_lock */
2608 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2609 {
2610         int port = BP_PORT(bp);
2611         int i, rc = 0, load_code = 0;
2612
2613         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2614         DP(NETIF_MSG_IFUP,
2615            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2616
2617 #ifdef BNX2X_STOP_ON_ERROR
2618         if (unlikely(bp->panic)) {
2619                 BNX2X_ERR("Can't load NIC when there is panic\n");
2620                 return -EPERM;
2621         }
2622 #endif
2623
2624         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2625
2626         /* zero the structure w/o any lock, before SP handler is initialized */
2627         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2628         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2629                 &bp->last_reported_link.link_report_flags);
2630
2631         if (IS_PF(bp))
2632                 /* must be called before memory allocation and HW init */
2633                 bnx2x_ilt_set_info(bp);
2634
2635         /*
2636          * Zero fastpath structures preserving invariants like napi, which are
2637          * allocated only once, fp index, max_cos, bp pointer.
2638          * Also set fp->mode and txdata_ptr.
2639          */
2640         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2641         for_each_queue(bp, i)
2642                 bnx2x_bz_fp(bp, i);
2643         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2644                                   bp->num_cnic_queues) *
2645                                   sizeof(struct bnx2x_fp_txdata));
2646
2647         bp->fcoe_init = false;
2648
2649         /* Set the receive queues buffer size */
2650         bnx2x_set_rx_buf_size(bp);
2651
2652         if (IS_PF(bp)) {
2653                 rc = bnx2x_alloc_mem(bp);
2654                 if (rc) {
2655                         BNX2X_ERR("Unable to allocate bp memory\n");
2656                         return rc;
2657                 }
2658         }
2659
2660         /* need to be done after alloc mem, since it's self adjusting to amount
2661          * of memory available for RSS queues
2662          */
2663         rc = bnx2x_alloc_fp_mem(bp);
2664         if (rc) {
2665                 BNX2X_ERR("Unable to allocate memory for fps\n");
2666                 LOAD_ERROR_EXIT(bp, load_error0);
2667         }
2668
2669         /* Allocated memory for FW statistics  */
2670         rc = bnx2x_alloc_fw_stats_mem(bp);
2671         if (rc)
2672                 LOAD_ERROR_EXIT(bp, load_error0);
2673
2674         /* request pf to initialize status blocks */
2675         if (IS_VF(bp)) {
2676                 rc = bnx2x_vfpf_init(bp);
2677                 if (rc)
2678                         LOAD_ERROR_EXIT(bp, load_error0);
2679         }
2680
2681         /* As long as bnx2x_alloc_mem() may possibly update
2682          * bp->num_queues, bnx2x_set_real_num_queues() should always
2683          * come after it. At this stage cnic queues are not counted.
2684          */
2685         rc = bnx2x_set_real_num_queues(bp, 0);
2686         if (rc) {
2687                 BNX2X_ERR("Unable to set real_num_queues\n");
2688                 LOAD_ERROR_EXIT(bp, load_error0);
2689         }
2690
2691         /* configure multi cos mappings in kernel.
2692          * this configuration may be overridden by a multi class queue
2693          * discipline or by a dcbx negotiation result.
2694          */
2695         bnx2x_setup_tc(bp->dev, bp->max_cos);
2696
2697         /* Add all NAPI objects */
2698         bnx2x_add_all_napi(bp);
2699         DP(NETIF_MSG_IFUP, "napi added\n");
2700         bnx2x_napi_enable(bp);
2701
2702         if (IS_PF(bp)) {
2703                 /* set pf load just before approaching the MCP */
2704                 bnx2x_set_pf_load(bp);
2705
2706                 /* if mcp exists send load request and analyze response */
2707                 if (!BP_NOMCP(bp)) {
2708                         /* attempt to load pf */
2709                         rc = bnx2x_nic_load_request(bp, &load_code);
2710                         if (rc)
2711                                 LOAD_ERROR_EXIT(bp, load_error1);
2712
2713                         /* what did mcp say? */
2714                         rc = bnx2x_compare_fw_ver(bp, load_code, true);
2715                         if (rc) {
2716                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2717                                 LOAD_ERROR_EXIT(bp, load_error2);
2718                         }
2719                 } else {
2720                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2721                 }
2722
2723                 /* mark pmf if applicable */
2724                 bnx2x_nic_load_pmf(bp, load_code);
2725
2726                 /* Init Function state controlling object */
2727                 bnx2x__init_func_obj(bp);
2728
2729                 /* Initialize HW */
2730                 rc = bnx2x_init_hw(bp, load_code);
2731                 if (rc) {
2732                         BNX2X_ERR("HW init failed, aborting\n");
2733                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2734                         LOAD_ERROR_EXIT(bp, load_error2);
2735                 }
2736         }
2737
2738         bnx2x_pre_irq_nic_init(bp);
2739
2740         /* Connect to IRQs */
2741         rc = bnx2x_setup_irqs(bp);
2742         if (rc) {
2743                 BNX2X_ERR("setup irqs failed\n");
2744                 if (IS_PF(bp))
2745                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2746                 LOAD_ERROR_EXIT(bp, load_error2);
2747         }
2748
2749         /* Init per-function objects */
2750         if (IS_PF(bp)) {
2751                 /* Setup NIC internals and enable interrupts */
2752                 bnx2x_post_irq_nic_init(bp, load_code);
2753
2754                 bnx2x_init_bp_objs(bp);
2755                 bnx2x_iov_nic_init(bp);
2756
2757                 /* Set AFEX default VLAN tag to an invalid value */
2758                 bp->afex_def_vlan_tag = -1;
2759                 bnx2x_nic_load_afex_dcc(bp, load_code);
2760                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2761                 rc = bnx2x_func_start(bp);
2762                 if (rc) {
2763                         BNX2X_ERR("Function start failed!\n");
2764                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2765
2766                         LOAD_ERROR_EXIT(bp, load_error3);
2767                 }
2768
2769                 /* Send LOAD_DONE command to MCP */
2770                 if (!BP_NOMCP(bp)) {
2771                         load_code = bnx2x_fw_command(bp,
2772                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2773                         if (!load_code) {
2774                                 BNX2X_ERR("MCP response failure, aborting\n");
2775                                 rc = -EBUSY;
2776                                 LOAD_ERROR_EXIT(bp, load_error3);
2777                         }
2778                 }
2779
2780                 /* initialize FW coalescing state machines in RAM */
2781                 bnx2x_update_coalesce(bp);
2782         }
2783
2784         /* setup the leading queue */
2785         rc = bnx2x_setup_leading(bp);
2786         if (rc) {
2787                 BNX2X_ERR("Setup leading failed!\n");
2788                 LOAD_ERROR_EXIT(bp, load_error3);
2789         }
2790
2791         /* set up the rest of the queues */
2792         for_each_nondefault_eth_queue(bp, i) {
2793                 if (IS_PF(bp))
2794                         rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2795                 else /* VF */
2796                         rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2797                 if (rc) {
2798                         BNX2X_ERR("Queue %d setup failed\n", i);
2799                         LOAD_ERROR_EXIT(bp, load_error3);
2800                 }
2801         }
2802
2803         /* setup rss */
2804         rc = bnx2x_init_rss(bp);
2805         if (rc) {
2806                 BNX2X_ERR("PF RSS init failed\n");
2807                 LOAD_ERROR_EXIT(bp, load_error3);
2808         }
2809
2810         /* Now when Clients are configured we are ready to work */
2811         bp->state = BNX2X_STATE_OPEN;
2812
2813         /* Configure a ucast MAC */
2814         if (IS_PF(bp))
2815                 rc = bnx2x_set_eth_mac(bp, true);
2816         else /* vf */
2817                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2818                                            true);
2819         if (rc) {
2820                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2821                 LOAD_ERROR_EXIT(bp, load_error3);
2822         }
2823
2824         if (IS_PF(bp) && bp->pending_max) {
2825                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2826                 bp->pending_max = 0;
2827         }
2828
2829         bp->force_link_down = false;
2830         if (bp->port.pmf) {
2831                 rc = bnx2x_initial_phy_init(bp, load_mode);
2832                 if (rc)
2833                         LOAD_ERROR_EXIT(bp, load_error3);
2834         }
2835         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2836
2837         /* Start fast path */
2838
2839         /* Re-configure vlan filters */
2840         rc = bnx2x_vlan_reconfigure_vid(bp);
2841         if (rc)
2842                 LOAD_ERROR_EXIT(bp, load_error3);
2843
2844         /* Initialize Rx filter. */
2845         bnx2x_set_rx_mode_inner(bp);
2846
2847         if (bp->flags & PTP_SUPPORTED) {
2848                 bnx2x_init_ptp(bp);
2849                 bnx2x_configure_ptp_filters(bp);
2850         }
2851         /* Start Tx */
2852         switch (load_mode) {
2853         case LOAD_NORMAL:
2854                 /* Tx queue should be only re-enabled */
2855                 netif_tx_wake_all_queues(bp->dev);
2856                 break;
2857
2858         case LOAD_OPEN:
2859                 netif_tx_start_all_queues(bp->dev);
2860                 smp_mb__after_atomic();
2861                 break;
2862
2863         case LOAD_DIAG:
2864         case LOAD_LOOPBACK_EXT:
2865                 bp->state = BNX2X_STATE_DIAG;
2866                 break;
2867
2868         default:
2869                 break;
2870         }
2871
2872         if (bp->port.pmf)
2873                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2874         else
2875                 bnx2x__link_status_update(bp);
2876
2877         /* start the timer */
2878         mod_timer(&bp->timer, jiffies + bp->current_interval);
2879
2880         if (CNIC_ENABLED(bp))
2881                 bnx2x_load_cnic(bp);
2882
2883         if (IS_PF(bp))
2884                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2885
2886         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2887                 /* mark driver is loaded in shmem2 */
2888                 u32 val;
2889                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2890                 val &= ~DRV_FLAGS_MTU_MASK;
2891                 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2892                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2893                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2894                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2895         }
2896
2897         /* Wait for all pending SP commands to complete */
2898         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2899                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2900                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2901                 return -EBUSY;
2902         }
2903
2904         /* Update driver data for On-Chip MFW dump. */
2905         if (IS_PF(bp))
2906                 bnx2x_update_mfw_dump(bp);
2907
2908         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2909         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2910                 bnx2x_dcbx_init(bp, false);
2911
2912         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2913                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2914
2915         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2916
2917         return 0;
2918
2919 #ifndef BNX2X_STOP_ON_ERROR
2920 load_error3:
2921         if (IS_PF(bp)) {
2922                 bnx2x_int_disable_sync(bp, 1);
2923
2924                 /* Clean queueable objects */
2925                 bnx2x_squeeze_objects(bp);
2926         }
2927
2928         /* Free SKBs, SGEs, TPA pool and driver internals */
2929         bnx2x_free_skbs(bp);
2930         for_each_rx_queue(bp, i)
2931                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2932
2933         /* Release IRQs */
2934         bnx2x_free_irq(bp);
2935 load_error2:
2936         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2937                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2938                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2939         }
2940
2941         bp->port.pmf = 0;
2942 load_error1:
2943         bnx2x_napi_disable(bp);
2944         bnx2x_del_all_napi(bp);
2945
2946         /* clear pf_load status, as it was already set */
2947         if (IS_PF(bp))
2948                 bnx2x_clear_pf_load(bp);
2949 load_error0:
2950         bnx2x_free_fw_stats_mem(bp);
2951         bnx2x_free_fp_mem(bp);
2952         bnx2x_free_mem(bp);
2953
2954         return rc;
2955 #endif /* ! BNX2X_STOP_ON_ERROR */
2956 }
2957
2958 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2959 {
2960         u8 rc = 0, cos, i;
2961
2962         /* Wait until tx fastpath tasks complete */
2963         for_each_tx_queue(bp, i) {
2964                 struct bnx2x_fastpath *fp = &bp->fp[i];
2965
2966                 for_each_cos_in_tx_queue(fp, cos)
2967                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2968                 if (rc)
2969                         return rc;
2970         }
2971         return 0;
2972 }
2973
2974 /* must be called with rtnl_lock */
2975 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2976 {
2977         int i;
2978         bool global = false;
2979
2980         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2981
2982         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2983                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2984
2985         /* mark driver is unloaded in shmem2 */
2986         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2987                 u32 val;
2988                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2989                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2990                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2991         }
2992
2993         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2994             (bp->state == BNX2X_STATE_CLOSED ||
2995              bp->state == BNX2X_STATE_ERROR)) {
2996                 /* We can get here if the driver has been unloaded
2997                  * during parity error recovery and is either waiting for a
2998                  * leader to complete or for other functions to unload and
2999                  * then ifdown has been issued. In this case we want to
3000                  * unload and let other functions to complete a recovery
3001                  * process.
3002                  */
3003                 bp->recovery_state = BNX2X_RECOVERY_DONE;
3004                 bp->is_leader = 0;
3005                 bnx2x_release_leader_lock(bp);
3006                 smp_mb();
3007
3008                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3009                 BNX2X_ERR("Can't unload in closed or error state\n");
3010                 return -EINVAL;
3011         }
3012
3013         /* Nothing to do during unload if previous bnx2x_nic_load()
3014          * have not completed successfully - all resources are released.
3015          *
3016          * we can get here only after unsuccessful ndo_* callback, during which
3017          * dev->IFF_UP flag is still on.
3018          */
3019         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3020                 return 0;
3021
3022         /* It's important to set the bp->state to the value different from
3023          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3024          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3025          */
3026         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3027         smp_mb();
3028
3029         /* indicate to VFs that the PF is going down */
3030         bnx2x_iov_channel_down(bp);
3031
3032         if (CNIC_LOADED(bp))
3033                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3034
3035         /* Stop Tx */
3036         bnx2x_tx_disable(bp);
3037         netdev_reset_tc(bp->dev);
3038
3039         bp->rx_mode = BNX2X_RX_MODE_NONE;
3040
3041         del_timer_sync(&bp->timer);
3042
3043         if (IS_PF(bp) && !BP_NOMCP(bp)) {
3044                 /* Set ALWAYS_ALIVE bit in shmem */
3045                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3046                 bnx2x_drv_pulse(bp);
3047                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3048                 bnx2x_save_statistics(bp);
3049         }
3050
3051         /* wait till consumers catch up with producers in all queues.
3052          * If we're recovering, FW can't write to host so no reason
3053          * to wait for the queues to complete all Tx.
3054          */
3055         if (unload_mode != UNLOAD_RECOVERY)
3056                 bnx2x_drain_tx_queues(bp);
3057
3058         /* if VF indicate to PF this function is going down (PF will delete sp
3059          * elements and clear initializations
3060          */
3061         if (IS_VF(bp)) {
3062                 bnx2x_clear_vlan_info(bp);
3063                 bnx2x_vfpf_close_vf(bp);
3064         } else if (unload_mode != UNLOAD_RECOVERY) {
3065                 /* if this is a normal/close unload need to clean up chip*/
3066                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3067         } else {
3068                 /* Send the UNLOAD_REQUEST to the MCP */
3069                 bnx2x_send_unload_req(bp, unload_mode);
3070
3071                 /* Prevent transactions to host from the functions on the
3072                  * engine that doesn't reset global blocks in case of global
3073                  * attention once global blocks are reset and gates are opened
3074                  * (the engine which leader will perform the recovery
3075                  * last).
3076                  */
3077                 if (!CHIP_IS_E1x(bp))
3078                         bnx2x_pf_disable(bp);
3079
3080                 /* Disable HW interrupts, NAPI */
3081                 bnx2x_netif_stop(bp, 1);
3082                 /* Delete all NAPI objects */
3083                 bnx2x_del_all_napi(bp);
3084                 if (CNIC_LOADED(bp))
3085                         bnx2x_del_all_napi_cnic(bp);
3086                 /* Release IRQs */
3087                 bnx2x_free_irq(bp);
3088
3089                 /* Report UNLOAD_DONE to MCP */
3090                 bnx2x_send_unload_done(bp, false);
3091         }
3092
3093         /*
3094          * At this stage no more interrupts will arrive so we may safely clean
3095          * the queueable objects here in case they failed to get cleaned so far.
3096          */
3097         if (IS_PF(bp))
3098                 bnx2x_squeeze_objects(bp);
3099
3100         /* There should be no more pending SP commands at this stage */
3101         bp->sp_state = 0;
3102
3103         bp->port.pmf = 0;
3104
3105         /* clear pending work in rtnl task */
3106         bp->sp_rtnl_state = 0;
3107         smp_mb();
3108
3109         /* Free SKBs, SGEs, TPA pool and driver internals */
3110         bnx2x_free_skbs(bp);
3111         if (CNIC_LOADED(bp))
3112                 bnx2x_free_skbs_cnic(bp);
3113         for_each_rx_queue(bp, i)
3114                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3115
3116         bnx2x_free_fp_mem(bp);
3117         if (CNIC_LOADED(bp))
3118                 bnx2x_free_fp_mem_cnic(bp);
3119
3120         if (IS_PF(bp)) {
3121                 if (CNIC_LOADED(bp))
3122                         bnx2x_free_mem_cnic(bp);
3123         }
3124         bnx2x_free_mem(bp);
3125
3126         bp->state = BNX2X_STATE_CLOSED;
3127         bp->cnic_loaded = false;
3128
3129         /* Clear driver version indication in shmem */
3130         if (IS_PF(bp) && !BP_NOMCP(bp))
3131                 bnx2x_update_mng_version(bp);
3132
3133         /* Check if there are pending parity attentions. If there are - set
3134          * RECOVERY_IN_PROGRESS.
3135          */
3136         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3137                 bnx2x_set_reset_in_progress(bp);
3138
3139                 /* Set RESET_IS_GLOBAL if needed */
3140                 if (global)
3141                         bnx2x_set_reset_global(bp);
3142         }
3143
3144         /* The last driver must disable a "close the gate" if there is no
3145          * parity attention or "process kill" pending.
3146          */
3147         if (IS_PF(bp) &&
3148             !bnx2x_clear_pf_load(bp) &&
3149             bnx2x_reset_is_done(bp, BP_PATH(bp)))
3150                 bnx2x_disable_close_the_gate(bp);
3151
3152         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3153
3154         return 0;
3155 }
3156
3157 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3158 {
3159         u16 pmcsr;
3160
3161         /* If there is no power capability, silently succeed */
3162         if (!bp->pdev->pm_cap) {
3163                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3164                 return 0;
3165         }
3166
3167         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3168
3169         switch (state) {
3170         case PCI_D0:
3171                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3172                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3173                                        PCI_PM_CTRL_PME_STATUS));
3174
3175                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3176                         /* delay required during transition out of D3hot */
3177                         msleep(20);
3178                 break;
3179
3180         case PCI_D3hot:
3181                 /* If there are other clients above don't
3182                    shut down the power */
3183                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3184                         return 0;
3185                 /* Don't shut down the power for emulation and FPGA */
3186                 if (CHIP_REV_IS_SLOW(bp))
3187                         return 0;
3188
3189                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3190                 pmcsr |= 3;
3191
3192                 if (bp->wol)
3193                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3194
3195                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3196                                       pmcsr);
3197
3198                 /* No more memory access after this point until
3199                 * device is brought back to D0.
3200                 */
3201                 break;
3202
3203         default:
3204                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3205                 return -EINVAL;
3206         }
3207         return 0;
3208 }
3209
3210 /*
3211  * net_device service functions
3212  */
3213 static int bnx2x_poll(struct napi_struct *napi, int budget)
3214 {
3215         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3216                                                  napi);
3217         struct bnx2x *bp = fp->bp;
3218         int rx_work_done;
3219         u8 cos;
3220
3221 #ifdef BNX2X_STOP_ON_ERROR
3222         if (unlikely(bp->panic)) {
3223                 napi_complete(napi);
3224                 return 0;
3225         }
3226 #endif
3227         for_each_cos_in_tx_queue(fp, cos)
3228                 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3229                         bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3230
3231         rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3232
3233         if (rx_work_done < budget) {
3234                 /* No need to update SB for FCoE L2 ring as long as
3235                  * it's connected to the default SB and the SB
3236                  * has been updated when NAPI was scheduled.
3237                  */
3238                 if (IS_FCOE_FP(fp)) {
3239                         napi_complete_done(napi, rx_work_done);
3240                 } else {
3241                         bnx2x_update_fpsb_idx(fp);
3242                         /* bnx2x_has_rx_work() reads the status block,
3243                          * thus we need to ensure that status block indices
3244                          * have been actually read (bnx2x_update_fpsb_idx)
3245                          * prior to this check (bnx2x_has_rx_work) so that
3246                          * we won't write the "newer" value of the status block
3247                          * to IGU (if there was a DMA right after
3248                          * bnx2x_has_rx_work and if there is no rmb, the memory
3249                          * reading (bnx2x_update_fpsb_idx) may be postponed
3250                          * to right before bnx2x_ack_sb). In this case there
3251                          * will never be another interrupt until there is
3252                          * another update of the status block, while there
3253                          * is still unhandled work.
3254                          */
3255                         rmb();
3256
3257                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3258                                 if (napi_complete_done(napi, rx_work_done)) {
3259                                         /* Re-enable interrupts */
3260                                         DP(NETIF_MSG_RX_STATUS,
3261                                            "Update index to %d\n", fp->fp_hc_idx);
3262                                         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3263                                                      le16_to_cpu(fp->fp_hc_idx),
3264                                                      IGU_INT_ENABLE, 1);
3265                                 }
3266                         } else {
3267                                 rx_work_done = budget;
3268                         }
3269                 }
3270         }
3271
3272         return rx_work_done;
3273 }
3274
3275 /* we split the first BD into headers and data BDs
3276  * to ease the pain of our fellow microcode engineers
3277  * we use one mapping for both BDs
3278  */
3279 static u16 bnx2x_tx_split(struct bnx2x *bp,
3280                           struct bnx2x_fp_txdata *txdata,
3281                           struct sw_tx_bd *tx_buf,
3282                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3283                           u16 bd_prod)
3284 {
3285         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3286         struct eth_tx_bd *d_tx_bd;
3287         dma_addr_t mapping;
3288         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3289
3290         /* first fix first BD */
3291         h_tx_bd->nbytes = cpu_to_le16(hlen);
3292
3293         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3294            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3295
3296         /* now get a new data BD
3297          * (after the pbd) and fill it */
3298         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3299         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3300
3301         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3302                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3303
3304         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3305         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3306         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3307
3308         /* this marks the BD as one that has no individual mapping */
3309         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3310
3311         DP(NETIF_MSG_TX_QUEUED,
3312            "TSO split data size is %d (%x:%x)\n",
3313            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3314
3315         /* update tx_bd */
3316         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3317
3318         return bd_prod;
3319 }
3320
3321 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3322 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3323 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3324 {
3325         __sum16 tsum = (__force __sum16) csum;
3326
3327         if (fix > 0)
3328                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3329                                   csum_partial(t_header - fix, fix, 0)));
3330
3331         else if (fix < 0)
3332                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3333                                   csum_partial(t_header, -fix, 0)));
3334
3335         return bswab16(tsum);
3336 }
3337
3338 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3339 {
3340         u32 rc;
3341         __u8 prot = 0;
3342         __be16 protocol;
3343
3344         if (skb->ip_summed != CHECKSUM_PARTIAL)
3345                 return XMIT_PLAIN;
3346
3347         protocol = vlan_get_protocol(skb);
3348         if (protocol == htons(ETH_P_IPV6)) {
3349                 rc = XMIT_CSUM_V6;
3350                 prot = ipv6_hdr(skb)->nexthdr;
3351         } else {
3352                 rc = XMIT_CSUM_V4;
3353                 prot = ip_hdr(skb)->protocol;
3354         }
3355
3356         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3357                 if (inner_ip_hdr(skb)->version == 6) {
3358                         rc |= XMIT_CSUM_ENC_V6;
3359                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3360                                 rc |= XMIT_CSUM_TCP;
3361                 } else {
3362                         rc |= XMIT_CSUM_ENC_V4;
3363                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3364                                 rc |= XMIT_CSUM_TCP;
3365                 }
3366         }
3367         if (prot == IPPROTO_TCP)
3368                 rc |= XMIT_CSUM_TCP;
3369
3370         if (skb_is_gso(skb)) {
3371                 if (skb_is_gso_v6(skb)) {
3372                         rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3373                         if (rc & XMIT_CSUM_ENC)
3374                                 rc |= XMIT_GSO_ENC_V6;
3375                 } else {
3376                         rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3377                         if (rc & XMIT_CSUM_ENC)
3378                                 rc |= XMIT_GSO_ENC_V4;
3379                 }
3380         }
3381
3382         return rc;
3383 }
3384
3385 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3386 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3387
3388 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3389 #define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3390
3391 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3392 /* check if packet requires linearization (packet is too fragmented)
3393    no need to check fragmentation if page size > 8K (there will be no
3394    violation to FW restrictions) */
3395 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3396                              u32 xmit_type)
3397 {
3398         int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3399         int to_copy = 0, hlen = 0;
3400
3401         if (xmit_type & XMIT_GSO_ENC)
3402                 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3403
3404         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3405                 if (xmit_type & XMIT_GSO) {
3406                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3407                         int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3408                         /* Number of windows to check */
3409                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3410                         int wnd_idx = 0;
3411                         int frag_idx = 0;
3412                         u32 wnd_sum = 0;
3413
3414                         /* Headers length */
3415                         if (xmit_type & XMIT_GSO_ENC)
3416                                 hlen = (int)(skb_inner_transport_header(skb) -
3417                                              skb->data) +
3418                                              inner_tcp_hdrlen(skb);
3419                         else
3420                                 hlen = (int)(skb_transport_header(skb) -
3421                                              skb->data) + tcp_hdrlen(skb);
3422
3423                         /* Amount of data (w/o headers) on linear part of SKB*/
3424                         first_bd_sz = skb_headlen(skb) - hlen;
3425
3426                         wnd_sum  = first_bd_sz;
3427
3428                         /* Calculate the first sum - it's special */
3429                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3430                                 wnd_sum +=
3431                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3432
3433                         /* If there was data on linear skb data - check it */
3434                         if (first_bd_sz > 0) {
3435                                 if (unlikely(wnd_sum < lso_mss)) {
3436                                         to_copy = 1;
3437                                         goto exit_lbl;
3438                                 }
3439
3440                                 wnd_sum -= first_bd_sz;
3441                         }
3442
3443                         /* Others are easier: run through the frag list and
3444                            check all windows */
3445                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3446                                 wnd_sum +=
3447                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3448
3449                                 if (unlikely(wnd_sum < lso_mss)) {
3450                                         to_copy = 1;
3451                                         break;
3452                                 }
3453                                 wnd_sum -=
3454                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3455                         }
3456                 } else {
3457                         /* in non-LSO too fragmented packet should always
3458                            be linearized */
3459                         to_copy = 1;
3460                 }
3461         }
3462
3463 exit_lbl:
3464         if (unlikely(to_copy))
3465                 DP(NETIF_MSG_TX_QUEUED,
3466                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3467                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3468                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3469
3470         return to_copy;
3471 }
3472 #endif
3473
3474 /**
3475  * bnx2x_set_pbd_gso - update PBD in GSO case.
3476  *
3477  * @skb:        packet skb
3478  * @pbd:        parse BD
3479  * @xmit_type:  xmit flags
3480  */
3481 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3482                               struct eth_tx_parse_bd_e1x *pbd,
3483                               u32 xmit_type)
3484 {
3485         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3486         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3487         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3488
3489         if (xmit_type & XMIT_GSO_V4) {
3490                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3491                 pbd->tcp_pseudo_csum =
3492                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3493                                                    ip_hdr(skb)->daddr,
3494                                                    0, IPPROTO_TCP, 0));
3495         } else {
3496                 pbd->tcp_pseudo_csum =
3497                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3498                                                  &ipv6_hdr(skb)->daddr,
3499                                                  0, IPPROTO_TCP, 0));
3500         }
3501
3502         pbd->global_data |=
3503                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3504 }
3505
3506 /**
3507  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3508  *
3509  * @bp:                 driver handle
3510  * @skb:                packet skb
3511  * @parsing_data:       data to be updated
3512  * @xmit_type:          xmit flags
3513  *
3514  * 57712/578xx related, when skb has encapsulation
3515  */
3516 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3517                                  u32 *parsing_data, u32 xmit_type)
3518 {
3519         *parsing_data |=
3520                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3521                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3522                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3523
3524         if (xmit_type & XMIT_CSUM_TCP) {
3525                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3526                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3527                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3528
3529                 return skb_inner_transport_header(skb) +
3530                         inner_tcp_hdrlen(skb) - skb->data;
3531         }
3532
3533         /* We support checksum offload for TCP and UDP only.
3534          * No need to pass the UDP header length - it's a constant.
3535          */
3536         return skb_inner_transport_header(skb) +
3537                 sizeof(struct udphdr) - skb->data;
3538 }
3539
3540 /**
3541  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3542  *
3543  * @bp:                 driver handle
3544  * @skb:                packet skb
3545  * @parsing_data:       data to be updated
3546  * @xmit_type:          xmit flags
3547  *
3548  * 57712/578xx related
3549  */
3550 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3551                                 u32 *parsing_data, u32 xmit_type)
3552 {
3553         *parsing_data |=
3554                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3555                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3556                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3557
3558         if (xmit_type & XMIT_CSUM_TCP) {
3559                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3560                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3561                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3562
3563                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3564         }
3565         /* We support checksum offload for TCP and UDP only.
3566          * No need to pass the UDP header length - it's a constant.
3567          */
3568         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3569 }
3570
3571 /* set FW indication according to inner or outer protocols if tunneled */
3572 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3573                                struct eth_tx_start_bd *tx_start_bd,
3574                                u32 xmit_type)
3575 {
3576         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3577
3578         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3579                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3580
3581         if (!(xmit_type & XMIT_CSUM_TCP))
3582                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3583 }
3584
3585 /**
3586  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3587  *
3588  * @bp:         driver handle
3589  * @skb:        packet skb
3590  * @pbd:        parse BD to be updated
3591  * @xmit_type:  xmit flags
3592  */
3593 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3594                              struct eth_tx_parse_bd_e1x *pbd,
3595                              u32 xmit_type)
3596 {
3597         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3598
3599         /* for now NS flag is not used in Linux */
3600         pbd->global_data =
3601                 cpu_to_le16(hlen |
3602                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3603                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3604
3605         pbd->ip_hlen_w = (skb_transport_header(skb) -
3606                         skb_network_header(skb)) >> 1;
3607
3608         hlen += pbd->ip_hlen_w;
3609
3610         /* We support checksum offload for TCP and UDP only */
3611         if (xmit_type & XMIT_CSUM_TCP)
3612                 hlen += tcp_hdrlen(skb) / 2;
3613         else
3614                 hlen += sizeof(struct udphdr) / 2;
3615
3616         pbd->total_hlen_w = cpu_to_le16(hlen);
3617         hlen = hlen*2;
3618
3619         if (xmit_type & XMIT_CSUM_TCP) {
3620                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3621
3622         } else {
3623                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3624
3625                 DP(NETIF_MSG_TX_QUEUED,
3626                    "hlen %d  fix %d  csum before fix %x\n",
3627                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3628
3629                 /* HW bug: fixup the CSUM */
3630                 pbd->tcp_pseudo_csum =
3631                         bnx2x_csum_fix(skb_transport_header(skb),
3632                                        SKB_CS(skb), fix);
3633
3634                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3635                    pbd->tcp_pseudo_csum);
3636         }
3637
3638         return hlen;
3639 }
3640
3641 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3642                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3643                                       struct eth_tx_parse_2nd_bd *pbd2,
3644                                       u16 *global_data,
3645                                       u32 xmit_type)
3646 {
3647         u16 hlen_w = 0;
3648         u8 outerip_off, outerip_len = 0;
3649
3650         /* from outer IP to transport */
3651         hlen_w = (skb_inner_transport_header(skb) -
3652                   skb_network_header(skb)) >> 1;
3653
3654         /* transport len */
3655         hlen_w += inner_tcp_hdrlen(skb) >> 1;
3656
3657         pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3658
3659         /* outer IP header info */
3660         if (xmit_type & XMIT_CSUM_V4) {
3661                 struct iphdr *iph = ip_hdr(skb);
3662                 u32 csum = (__force u32)(~iph->check) -
3663                            (__force u32)iph->tot_len -
3664                            (__force u32)iph->frag_off;
3665
3666                 outerip_len = iph->ihl << 1;
3667
3668                 pbd2->fw_ip_csum_wo_len_flags_frag =
3669                         bswab16(csum_fold((__force __wsum)csum));
3670         } else {
3671                 pbd2->fw_ip_hdr_to_payload_w =
3672                         hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3673                 pbd_e2->data.tunnel_data.flags |=
3674                         ETH_TUNNEL_DATA_IPV6_OUTER;
3675         }
3676
3677         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3678
3679         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3680
3681         /* inner IP header info */
3682         if (xmit_type & XMIT_CSUM_ENC_V4) {
3683                 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3684
3685                 pbd_e2->data.tunnel_data.pseudo_csum =
3686                         bswab16(~csum_tcpudp_magic(
3687                                         inner_ip_hdr(skb)->saddr,
3688                                         inner_ip_hdr(skb)->daddr,
3689                                         0, IPPROTO_TCP, 0));
3690         } else {
3691                 pbd_e2->data.tunnel_data.pseudo_csum =
3692                         bswab16(~csum_ipv6_magic(
3693                                         &inner_ipv6_hdr(skb)->saddr,
3694                                         &inner_ipv6_hdr(skb)->daddr,
3695                                         0, IPPROTO_TCP, 0));
3696         }
3697
3698         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3699
3700         *global_data |=
3701                 outerip_off |
3702                 (outerip_len <<
3703                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3704                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3705                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3706
3707         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3708                 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3709                 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3710         }
3711 }
3712
3713 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3714                                          u32 xmit_type)
3715 {
3716         struct ipv6hdr *ipv6;
3717
3718         if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3719                 return;
3720
3721         if (xmit_type & XMIT_GSO_ENC_V6)
3722                 ipv6 = inner_ipv6_hdr(skb);
3723         else /* XMIT_GSO_V6 */
3724                 ipv6 = ipv6_hdr(skb);
3725
3726         if (ipv6->nexthdr == NEXTHDR_IPV6)
3727                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3728 }
3729
3730 /* called with netif_tx_lock
3731  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3732  * netif_wake_queue()
3733  */
3734 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3735 {
3736         struct bnx2x *bp = netdev_priv(dev);
3737
3738         struct netdev_queue *txq;
3739         struct bnx2x_fp_txdata *txdata;
3740         struct sw_tx_bd *tx_buf;
3741         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3742         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3743         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3744         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3745         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3746         u32 pbd_e2_parsing_data = 0;
3747         u16 pkt_prod, bd_prod;
3748         int nbd, txq_index;
3749         dma_addr_t mapping;
3750         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3751         int i;
3752         u8 hlen = 0;
3753         __le16 pkt_size = 0;
3754         struct ethhdr *eth;
3755         u8 mac_type = UNICAST_ADDRESS;
3756
3757 #ifdef BNX2X_STOP_ON_ERROR
3758         if (unlikely(bp->panic))
3759                 return NETDEV_TX_BUSY;
3760 #endif
3761
3762         txq_index = skb_get_queue_mapping(skb);
3763         txq = netdev_get_tx_queue(dev, txq_index);
3764
3765         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3766
3767         txdata = &bp->bnx2x_txq[txq_index];
3768
3769         /* enable this debug print to view the transmission queue being used
3770         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3771            txq_index, fp_index, txdata_index); */
3772
3773         /* enable this debug print to view the transmission details
3774         DP(NETIF_MSG_TX_QUEUED,
3775            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3776            txdata->cid, fp_index, txdata_index, txdata, fp); */
3777
3778         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3779                         skb_shinfo(skb)->nr_frags +
3780                         BDS_PER_TX_PKT +
3781                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3782                 /* Handle special storage cases separately */
3783                 if (txdata->tx_ring_size == 0) {
3784                         struct bnx2x_eth_q_stats *q_stats =
3785                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3786                         q_stats->driver_filtered_tx_pkt++;
3787                         dev_kfree_skb(skb);
3788                         return NETDEV_TX_OK;
3789                 }
3790                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3791                 netif_tx_stop_queue(txq);
3792                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3793
3794                 return NETDEV_TX_BUSY;
3795         }
3796
3797         DP(NETIF_MSG_TX_QUEUED,
3798            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3799            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3800            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3801            skb->len);
3802
3803         eth = (struct ethhdr *)skb->data;
3804
3805         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3806         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3807                 if (is_broadcast_ether_addr(eth->h_dest))
3808                         mac_type = BROADCAST_ADDRESS;
3809                 else
3810                         mac_type = MULTICAST_ADDRESS;
3811         }
3812
3813 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3814         /* First, check if we need to linearize the skb (due to FW
3815            restrictions). No need to check fragmentation if page size > 8K
3816            (there will be no violation to FW restrictions) */
3817         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3818                 /* Statistics of linearization */
3819                 bp->lin_cnt++;
3820                 if (skb_linearize(skb) != 0) {
3821                         DP(NETIF_MSG_TX_QUEUED,
3822                            "SKB linearization failed - silently dropping this SKB\n");
3823                         dev_kfree_skb_any(skb);
3824                         return NETDEV_TX_OK;
3825                 }
3826         }
3827 #endif
3828         /* Map skb linear data for DMA */
3829         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3830                                  skb_headlen(skb), DMA_TO_DEVICE);
3831         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3832                 DP(NETIF_MSG_TX_QUEUED,
3833                    "SKB mapping failed - silently dropping this SKB\n");
3834                 dev_kfree_skb_any(skb);
3835                 return NETDEV_TX_OK;
3836         }
3837         /*
3838         Please read carefully. First we use one BD which we mark as start,
3839         then we have a parsing info BD (used for TSO or xsum),
3840         and only then we have the rest of the TSO BDs.
3841         (don't forget to mark the last one as last,
3842         and to unmap only AFTER you write to the BD ...)
3843         And above all, all pdb sizes are in words - NOT DWORDS!
3844         */
3845
3846         /* get current pkt produced now - advance it just before sending packet
3847          * since mapping of pages may fail and cause packet to be dropped
3848          */
3849         pkt_prod = txdata->tx_pkt_prod;
3850         bd_prod = TX_BD(txdata->tx_bd_prod);
3851
3852         /* get a tx_buf and first BD
3853          * tx_start_bd may be changed during SPLIT,
3854          * but first_bd will always stay first
3855          */
3856         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3857         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3858         first_bd = tx_start_bd;
3859
3860         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3861
3862         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3863                 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3864                         bp->eth_stats.ptp_skip_tx_ts++;
3865                         BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3866                 } else if (bp->ptp_tx_skb) {
3867                         bp->eth_stats.ptp_skip_tx_ts++;
3868                         netdev_err_once(bp->dev,
3869                                         "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3870                 } else {
3871                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3872                         /* schedule check for Tx timestamp */
3873                         bp->ptp_tx_skb = skb_get(skb);
3874                         bp->ptp_tx_start = jiffies;
3875                         schedule_work(&bp->ptp_task);
3876                 }
3877         }
3878
3879         /* header nbd: indirectly zero other flags! */
3880         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3881
3882         /* remember the first BD of the packet */
3883         tx_buf->first_bd = txdata->tx_bd_prod;
3884         tx_buf->skb = skb;
3885         tx_buf->flags = 0;
3886
3887         DP(NETIF_MSG_TX_QUEUED,
3888            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3889            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3890
3891         if (skb_vlan_tag_present(skb)) {
3892                 tx_start_bd->vlan_or_ethertype =
3893                     cpu_to_le16(skb_vlan_tag_get(skb));
3894                 tx_start_bd->bd_flags.as_bitfield |=
3895                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3896         } else {
3897                 /* when transmitting in a vf, start bd must hold the ethertype
3898                  * for fw to enforce it
3899                  */
3900                 u16 vlan_tci = 0;
3901 #ifndef BNX2X_STOP_ON_ERROR
3902                 if (IS_VF(bp)) {
3903 #endif
3904                         /* Still need to consider inband vlan for enforced */
3905                         if (__vlan_get_tag(skb, &vlan_tci)) {
3906                                 tx_start_bd->vlan_or_ethertype =
3907                                         cpu_to_le16(ntohs(eth->h_proto));
3908                         } else {
3909                                 tx_start_bd->bd_flags.as_bitfield |=
3910                                         (X_ETH_INBAND_VLAN <<
3911                                          ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3912                                 tx_start_bd->vlan_or_ethertype =
3913                                         cpu_to_le16(vlan_tci);
3914                         }
3915 #ifndef BNX2X_STOP_ON_ERROR
3916                 } else {
3917                         /* used by FW for packet accounting */
3918                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3919                 }
3920 #endif
3921         }
3922
3923         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3924
3925         /* turn on parsing and get a BD */
3926         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3927
3928         if (xmit_type & XMIT_CSUM)
3929                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3930
3931         if (!CHIP_IS_E1x(bp)) {
3932                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3933                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3934
3935                 if (xmit_type & XMIT_CSUM_ENC) {
3936                         u16 global_data = 0;
3937
3938                         /* Set PBD in enc checksum offload case */
3939                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3940                                                       &pbd_e2_parsing_data,
3941                                                       xmit_type);
3942
3943                         /* turn on 2nd parsing and get a BD */
3944                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3945
3946                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3947
3948                         memset(pbd2, 0, sizeof(*pbd2));
3949
3950                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3951                                 (skb_inner_network_header(skb) -
3952                                  skb->data) >> 1;
3953
3954                         if (xmit_type & XMIT_GSO_ENC)
3955                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3956                                                           &global_data,
3957                                                           xmit_type);
3958
3959                         pbd2->global_data = cpu_to_le16(global_data);
3960
3961                         /* add addition parse BD indication to start BD */
3962                         SET_FLAG(tx_start_bd->general_data,
3963                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3964                         /* set encapsulation flag in start BD */
3965                         SET_FLAG(tx_start_bd->general_data,
3966                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3967
3968                         tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3969
3970                         nbd++;
3971                 } else if (xmit_type & XMIT_CSUM) {
3972                         /* Set PBD in checksum offload case w/o encapsulation */
3973                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3974                                                      &pbd_e2_parsing_data,
3975                                                      xmit_type);
3976                 }
3977
3978                 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3979                 /* Add the macs to the parsing BD if this is a vf or if
3980                  * Tx Switching is enabled.
3981                  */
3982                 if (IS_VF(bp)) {
3983                         /* override GRE parameters in BD */
3984                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3985                                               &pbd_e2->data.mac_addr.src_mid,
3986                                               &pbd_e2->data.mac_addr.src_lo,
3987                                               eth->h_source);
3988
3989                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3990                                               &pbd_e2->data.mac_addr.dst_mid,
3991                                               &pbd_e2->data.mac_addr.dst_lo,
3992                                               eth->h_dest);
3993                 } else {
3994                         if (bp->flags & TX_SWITCHING)
3995                                 bnx2x_set_fw_mac_addr(
3996                                                 &pbd_e2->data.mac_addr.dst_hi,
3997                                                 &pbd_e2->data.mac_addr.dst_mid,
3998                                                 &pbd_e2->data.mac_addr.dst_lo,
3999                                                 eth->h_dest);
4000 #ifdef BNX2X_STOP_ON_ERROR
4001                         /* Enforce security is always set in Stop on Error -
4002                          * source mac should be present in the parsing BD
4003                          */
4004                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4005                                               &pbd_e2->data.mac_addr.src_mid,
4006                                               &pbd_e2->data.mac_addr.src_lo,
4007                                               eth->h_source);
4008 #endif
4009                 }
4010
4011                 SET_FLAG(pbd_e2_parsing_data,
4012                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4013         } else {
4014                 u16 global_data = 0;
4015                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4016                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4017                 /* Set PBD in checksum offload case */
4018                 if (xmit_type & XMIT_CSUM)
4019                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4020
4021                 SET_FLAG(global_data,
4022                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4023                 pbd_e1x->global_data |= cpu_to_le16(global_data);
4024         }
4025
4026         /* Setup the data pointer of the first BD of the packet */
4027         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4028         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4029         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4030         pkt_size = tx_start_bd->nbytes;
4031
4032         DP(NETIF_MSG_TX_QUEUED,
4033            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4034            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4035            le16_to_cpu(tx_start_bd->nbytes),
4036            tx_start_bd->bd_flags.as_bitfield,
4037            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4038
4039         if (xmit_type & XMIT_GSO) {
4040
4041                 DP(NETIF_MSG_TX_QUEUED,
4042                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4043                    skb->len, hlen, skb_headlen(skb),
4044                    skb_shinfo(skb)->gso_size);
4045
4046                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4047
4048                 if (unlikely(skb_headlen(skb) > hlen)) {
4049                         nbd++;
4050                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4051                                                  &tx_start_bd, hlen,
4052                                                  bd_prod);
4053                 }
4054                 if (!CHIP_IS_E1x(bp))
4055                         pbd_e2_parsing_data |=
4056                                 (skb_shinfo(skb)->gso_size <<
4057                                  ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4058                                  ETH_TX_PARSE_BD_E2_LSO_MSS;
4059                 else
4060                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4061         }
4062
4063         /* Set the PBD's parsing_data field if not zero
4064          * (for the chips newer than 57711).
4065          */
4066         if (pbd_e2_parsing_data)
4067                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4068
4069         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4070
4071         /* Handle fragmented skb */
4072         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4073                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4074
4075                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4076                                            skb_frag_size(frag), DMA_TO_DEVICE);
4077                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4078                         unsigned int pkts_compl = 0, bytes_compl = 0;
4079
4080                         DP(NETIF_MSG_TX_QUEUED,
4081                            "Unable to map page - dropping packet...\n");
4082
4083                         /* we need unmap all buffers already mapped
4084                          * for this SKB;
4085                          * first_bd->nbd need to be properly updated
4086                          * before call to bnx2x_free_tx_pkt
4087                          */
4088                         first_bd->nbd = cpu_to_le16(nbd);
4089                         bnx2x_free_tx_pkt(bp, txdata,
4090                                           TX_BD(txdata->tx_pkt_prod),
4091                                           &pkts_compl, &bytes_compl);
4092                         return NETDEV_TX_OK;
4093                 }
4094
4095                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4096                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4097                 if (total_pkt_bd == NULL)
4098                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4099
4100                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4101                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4102                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4103                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4104                 nbd++;
4105
4106                 DP(NETIF_MSG_TX_QUEUED,
4107                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4108                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4109                    le16_to_cpu(tx_data_bd->nbytes));
4110         }
4111
4112         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4113
4114         /* update with actual num BDs */
4115         first_bd->nbd = cpu_to_le16(nbd);
4116
4117         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4118
4119         /* now send a tx doorbell, counting the next BD
4120          * if the packet contains or ends with it
4121          */
4122         if (TX_BD_POFF(bd_prod) < nbd)
4123                 nbd++;
4124
4125         /* total_pkt_bytes should be set on the first data BD if
4126          * it's not an LSO packet and there is more than one
4127          * data BD. In this case pkt_size is limited by an MTU value.
4128          * However we prefer to set it for an LSO packet (while we don't
4129          * have to) in order to save some CPU cycles in a none-LSO
4130          * case, when we much more care about them.
4131          */
4132         if (total_pkt_bd != NULL)
4133                 total_pkt_bd->total_pkt_bytes = pkt_size;
4134
4135         if (pbd_e1x)
4136                 DP(NETIF_MSG_TX_QUEUED,
4137                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4138                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4139                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4140                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4141                     le16_to_cpu(pbd_e1x->total_hlen_w));
4142         if (pbd_e2)
4143                 DP(NETIF_MSG_TX_QUEUED,
4144                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4145                    pbd_e2,
4146                    pbd_e2->data.mac_addr.dst_hi,
4147                    pbd_e2->data.mac_addr.dst_mid,
4148                    pbd_e2->data.mac_addr.dst_lo,
4149                    pbd_e2->data.mac_addr.src_hi,
4150                    pbd_e2->data.mac_addr.src_mid,
4151                    pbd_e2->data.mac_addr.src_lo,
4152                    pbd_e2->parsing_data);
4153         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4154
4155         netdev_tx_sent_queue(txq, skb->len);
4156
4157         skb_tx_timestamp(skb);
4158
4159         txdata->tx_pkt_prod++;
4160         /*
4161          * Make sure that the BD data is updated before updating the producer
4162          * since FW might read the BD right after the producer is updated.
4163          * This is only applicable for weak-ordered memory model archs such
4164          * as IA-64. The following barrier is also mandatory since FW will
4165          * assumes packets must have BDs.
4166          */
4167         wmb();
4168
4169         txdata->tx_db.data.prod += nbd;
4170         /* make sure descriptor update is observed by HW */
4171         wmb();
4172
4173         DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4174
4175         mmiowb();
4176
4177         txdata->tx_bd_prod += nbd;
4178
4179         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4180                 netif_tx_stop_queue(txq);
4181
4182                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4183                  * ordering of set_bit() in netif_tx_stop_queue() and read of
4184                  * fp->bd_tx_cons */
4185                 smp_mb();
4186
4187                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4188                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4189                         netif_tx_wake_queue(txq);
4190         }
4191         txdata->tx_pkt++;
4192
4193         return NETDEV_TX_OK;
4194 }
4195
4196 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4197 {
4198         int mfw_vn = BP_FW_MB_IDX(bp);
4199         u32 tmp;
4200
4201         /* If the shmem shouldn't affect configuration, reflect */
4202         if (!IS_MF_BD(bp)) {
4203                 int i;
4204
4205                 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4206                         c2s_map[i] = i;
4207                 *c2s_default = 0;
4208
4209                 return;
4210         }
4211
4212         tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4213         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4214         c2s_map[0] = tmp & 0xff;
4215         c2s_map[1] = (tmp >> 8) & 0xff;
4216         c2s_map[2] = (tmp >> 16) & 0xff;
4217         c2s_map[3] = (tmp >> 24) & 0xff;
4218
4219         tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4220         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4221         c2s_map[4] = tmp & 0xff;
4222         c2s_map[5] = (tmp >> 8) & 0xff;
4223         c2s_map[6] = (tmp >> 16) & 0xff;
4224         c2s_map[7] = (tmp >> 24) & 0xff;
4225
4226         tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4227         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4228         *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4229 }
4230
4231 /**
4232  * bnx2x_setup_tc - routine to configure net_device for multi tc
4233  *
4234  * @netdev: net device to configure
4235  * @tc: number of traffic classes to enable
4236  *
4237  * callback connected to the ndo_setup_tc function pointer
4238  */
4239 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4240 {
4241         struct bnx2x *bp = netdev_priv(dev);
4242         u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4243         int cos, prio, count, offset;
4244
4245         /* setup tc must be called under rtnl lock */
4246         ASSERT_RTNL();
4247
4248         /* no traffic classes requested. Aborting */
4249         if (!num_tc) {
4250                 netdev_reset_tc(dev);
4251                 return 0;
4252         }
4253
4254         /* requested to support too many traffic classes */
4255         if (num_tc > bp->max_cos) {
4256                 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4257                           num_tc, bp->max_cos);
4258                 return -EINVAL;
4259         }
4260
4261         /* declare amount of supported traffic classes */
4262         if (netdev_set_num_tc(dev, num_tc)) {
4263                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4264                 return -EINVAL;
4265         }
4266
4267         bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4268
4269         /* configure priority to traffic class mapping */
4270         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4271                 int outer_prio = c2s_map[prio];
4272
4273                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4274                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4275                    "mapping priority %d to tc %d\n",
4276                    outer_prio, bp->prio_to_cos[outer_prio]);
4277         }
4278
4279         /* Use this configuration to differentiate tc0 from other COSes
4280            This can be used for ets or pfc, and save the effort of setting
4281            up a multio class queue disc or negotiating DCBX with a switch
4282         netdev_set_prio_tc_map(dev, 0, 0);
4283         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4284         for (prio = 1; prio < 16; prio++) {
4285                 netdev_set_prio_tc_map(dev, prio, 1);
4286                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4287         } */
4288
4289         /* configure traffic class to transmission queue mapping */
4290         for (cos = 0; cos < bp->max_cos; cos++) {
4291                 count = BNX2X_NUM_ETH_QUEUES(bp);
4292                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4293                 netdev_set_tc_queue(dev, cos, count, offset);
4294                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4295                    "mapping tc %d to offset %d count %d\n",
4296                    cos, offset, count);
4297         }
4298
4299         return 0;
4300 }
4301
4302 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4303                      void *type_data)
4304 {
4305         struct tc_mqprio_qopt *mqprio = type_data;
4306
4307         if (type != TC_SETUP_QDISC_MQPRIO)
4308                 return -EOPNOTSUPP;
4309
4310         mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4311
4312         return bnx2x_setup_tc(dev, mqprio->num_tc);
4313 }
4314
4315 /* called with rtnl_lock */
4316 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4317 {
4318         struct sockaddr *addr = p;
4319         struct bnx2x *bp = netdev_priv(dev);
4320         int rc = 0;
4321
4322         if (!is_valid_ether_addr(addr->sa_data)) {
4323                 BNX2X_ERR("Requested MAC address is not valid\n");
4324                 return -EINVAL;
4325         }
4326
4327         if (IS_MF_STORAGE_ONLY(bp)) {
4328                 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4329                 return -EINVAL;
4330         }
4331
4332         if (netif_running(dev))  {
4333                 rc = bnx2x_set_eth_mac(bp, false);
4334                 if (rc)
4335                         return rc;
4336         }
4337
4338         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4339
4340         if (netif_running(dev))
4341                 rc = bnx2x_set_eth_mac(bp, true);
4342
4343         if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4344                 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4345
4346         return rc;
4347 }
4348
4349 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4350 {
4351         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4352         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4353         u8 cos;
4354
4355         /* Common */
4356
4357         if (IS_FCOE_IDX(fp_index)) {
4358                 memset(sb, 0, sizeof(union host_hc_status_block));
4359                 fp->status_blk_mapping = 0;
4360         } else {
4361                 /* status blocks */
4362                 if (!CHIP_IS_E1x(bp))
4363                         BNX2X_PCI_FREE(sb->e2_sb,
4364                                        bnx2x_fp(bp, fp_index,
4365                                                 status_blk_mapping),
4366                                        sizeof(struct host_hc_status_block_e2));
4367                 else
4368                         BNX2X_PCI_FREE(sb->e1x_sb,
4369                                        bnx2x_fp(bp, fp_index,
4370                                                 status_blk_mapping),
4371                                        sizeof(struct host_hc_status_block_e1x));
4372         }
4373
4374         /* Rx */
4375         if (!skip_rx_queue(bp, fp_index)) {
4376                 bnx2x_free_rx_bds(fp);
4377
4378                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4379                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4380                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4381                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4382                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4383
4384                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4385                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4386                                sizeof(struct eth_fast_path_rx_cqe) *
4387                                NUM_RCQ_BD);
4388
4389                 /* SGE ring */
4390                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4391                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4392                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4393                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4394         }
4395
4396         /* Tx */
4397         if (!skip_tx_queue(bp, fp_index)) {
4398                 /* fastpath tx rings: tx_buf tx_desc */
4399                 for_each_cos_in_tx_queue(fp, cos) {
4400                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4401
4402                         DP(NETIF_MSG_IFDOWN,
4403                            "freeing tx memory of fp %d cos %d cid %d\n",
4404                            fp_index, cos, txdata->cid);
4405
4406                         BNX2X_FREE(txdata->tx_buf_ring);
4407                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4408                                 txdata->tx_desc_mapping,
4409                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4410                 }
4411         }
4412         /* end of fastpath */
4413 }
4414
4415 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4416 {
4417         int i;
4418         for_each_cnic_queue(bp, i)
4419                 bnx2x_free_fp_mem_at(bp, i);
4420 }
4421
4422 void bnx2x_free_fp_mem(struct bnx2x *bp)
4423 {
4424         int i;
4425         for_each_eth_queue(bp, i)
4426                 bnx2x_free_fp_mem_at(bp, i);
4427 }
4428
4429 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4430 {
4431         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4432         if (!CHIP_IS_E1x(bp)) {
4433                 bnx2x_fp(bp, index, sb_index_values) =
4434                         (__le16 *)status_blk.e2_sb->sb.index_values;
4435                 bnx2x_fp(bp, index, sb_running_index) =
4436                         (__le16 *)status_blk.e2_sb->sb.running_index;
4437         } else {
4438                 bnx2x_fp(bp, index, sb_index_values) =
4439                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4440                 bnx2x_fp(bp, index, sb_running_index) =
4441                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4442         }
4443 }
4444
4445 /* Returns the number of actually allocated BDs */
4446 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4447                               int rx_ring_size)
4448 {
4449         struct bnx2x *bp = fp->bp;
4450         u16 ring_prod, cqe_ring_prod;
4451         int i, failure_cnt = 0;
4452
4453         fp->rx_comp_cons = 0;
4454         cqe_ring_prod = ring_prod = 0;
4455
4456         /* This routine is called only during fo init so
4457          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4458          */
4459         for (i = 0; i < rx_ring_size; i++) {
4460                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4461                         failure_cnt++;
4462                         continue;
4463                 }
4464                 ring_prod = NEXT_RX_IDX(ring_prod);
4465                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4466                 WARN_ON(ring_prod <= (i - failure_cnt));
4467         }
4468
4469         if (failure_cnt)
4470                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4471                           i - failure_cnt, fp->index);
4472
4473         fp->rx_bd_prod = ring_prod;
4474         /* Limit the CQE producer by the CQE ring size */
4475         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4476                                cqe_ring_prod);
4477
4478         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4479
4480         return i - failure_cnt;
4481 }
4482
4483 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4484 {
4485         int i;
4486
4487         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4488                 struct eth_rx_cqe_next_page *nextpg;
4489
4490                 nextpg = (struct eth_rx_cqe_next_page *)
4491                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4492                 nextpg->addr_hi =
4493                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4494                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4495                 nextpg->addr_lo =
4496                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4497                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4498         }
4499 }
4500
4501 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4502 {
4503         union host_hc_status_block *sb;
4504         struct bnx2x_fastpath *fp = &bp->fp[index];
4505         int ring_size = 0;
4506         u8 cos;
4507         int rx_ring_size = 0;
4508
4509         if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4510                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4511                 bp->rx_ring_size = rx_ring_size;
4512         } else if (!bp->rx_ring_size) {
4513                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4514
4515                 if (CHIP_IS_E3(bp)) {
4516                         u32 cfg = SHMEM_RD(bp,
4517                                            dev_info.port_hw_config[BP_PORT(bp)].
4518                                            default_cfg);
4519
4520                         /* Decrease ring size for 1G functions */
4521                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4522                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4523                                 rx_ring_size /= 10;
4524                 }
4525
4526                 /* allocate at least number of buffers required by FW */
4527                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4528                                      MIN_RX_SIZE_TPA, rx_ring_size);
4529
4530                 bp->rx_ring_size = rx_ring_size;
4531         } else /* if rx_ring_size specified - use it */
4532                 rx_ring_size = bp->rx_ring_size;
4533
4534         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4535
4536         /* Common */
4537         sb = &bnx2x_fp(bp, index, status_blk);
4538
4539         if (!IS_FCOE_IDX(index)) {
4540                 /* status blocks */
4541                 if (!CHIP_IS_E1x(bp)) {
4542                         sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4543                                                     sizeof(struct host_hc_status_block_e2));
4544                         if (!sb->e2_sb)
4545                                 goto alloc_mem_err;
4546                 } else {
4547                         sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4548                                                      sizeof(struct host_hc_status_block_e1x));
4549                         if (!sb->e1x_sb)
4550                                 goto alloc_mem_err;
4551                 }
4552         }
4553
4554         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4555          * set shortcuts for it.
4556          */
4557         if (!IS_FCOE_IDX(index))
4558                 set_sb_shortcuts(bp, index);
4559
4560         /* Tx */
4561         if (!skip_tx_queue(bp, index)) {
4562                 /* fastpath tx rings: tx_buf tx_desc */
4563                 for_each_cos_in_tx_queue(fp, cos) {
4564                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4565
4566                         DP(NETIF_MSG_IFUP,
4567                            "allocating tx memory of fp %d cos %d\n",
4568                            index, cos);
4569
4570                         txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4571                                                       sizeof(struct sw_tx_bd),
4572                                                       GFP_KERNEL);
4573                         if (!txdata->tx_buf_ring)
4574                                 goto alloc_mem_err;
4575                         txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4576                                                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4577                         if (!txdata->tx_desc_ring)
4578                                 goto alloc_mem_err;
4579                 }
4580         }
4581
4582         /* Rx */
4583         if (!skip_rx_queue(bp, index)) {
4584                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4585                 bnx2x_fp(bp, index, rx_buf_ring) =
4586                         kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4587                 if (!bnx2x_fp(bp, index, rx_buf_ring))
4588                         goto alloc_mem_err;
4589                 bnx2x_fp(bp, index, rx_desc_ring) =
4590                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4591                                         sizeof(struct eth_rx_bd) * NUM_RX_BD);
4592                 if (!bnx2x_fp(bp, index, rx_desc_ring))
4593                         goto alloc_mem_err;
4594
4595                 /* Seed all CQEs by 1s */
4596                 bnx2x_fp(bp, index, rx_comp_ring) =
4597                         BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4598                                          sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4599                 if (!bnx2x_fp(bp, index, rx_comp_ring))
4600                         goto alloc_mem_err;
4601
4602                 /* SGE ring */
4603                 bnx2x_fp(bp, index, rx_page_ring) =
4604                         kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4605                                 GFP_KERNEL);
4606                 if (!bnx2x_fp(bp, index, rx_page_ring))
4607                         goto alloc_mem_err;
4608                 bnx2x_fp(bp, index, rx_sge_ring) =
4609                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4610                                         BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4611                 if (!bnx2x_fp(bp, index, rx_sge_ring))
4612                         goto alloc_mem_err;
4613                 /* RX BD ring */
4614                 bnx2x_set_next_page_rx_bd(fp);
4615
4616                 /* CQ ring */
4617                 bnx2x_set_next_page_rx_cq(fp);
4618
4619                 /* BDs */
4620                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4621                 if (ring_size < rx_ring_size)
4622                         goto alloc_mem_err;
4623         }
4624
4625         return 0;
4626
4627 /* handles low memory cases */
4628 alloc_mem_err:
4629         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4630                                                 index, ring_size);
4631         /* FW will drop all packets if queue is not big enough,
4632          * In these cases we disable the queue
4633          * Min size is different for OOO, TPA and non-TPA queues
4634          */
4635         if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4636                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4637                         /* release memory allocated for this queue */
4638                         bnx2x_free_fp_mem_at(bp, index);
4639                         return -ENOMEM;
4640         }
4641         return 0;
4642 }
4643
4644 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4645 {
4646         if (!NO_FCOE(bp))
4647                 /* FCoE */
4648                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4649                         /* we will fail load process instead of mark
4650                          * NO_FCOE_FLAG
4651                          */
4652                         return -ENOMEM;
4653
4654         return 0;
4655 }
4656
4657 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4658 {
4659         int i;
4660
4661         /* 1. Allocate FP for leading - fatal if error
4662          * 2. Allocate RSS - fix number of queues if error
4663          */
4664
4665         /* leading */
4666         if (bnx2x_alloc_fp_mem_at(bp, 0))
4667                 return -ENOMEM;
4668
4669         /* RSS */
4670         for_each_nondefault_eth_queue(bp, i)
4671                 if (bnx2x_alloc_fp_mem_at(bp, i))
4672                         break;
4673
4674         /* handle memory failures */
4675         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4676                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4677
4678                 WARN_ON(delta < 0);
4679                 bnx2x_shrink_eth_fp(bp, delta);
4680                 if (CNIC_SUPPORT(bp))
4681                         /* move non eth FPs next to last eth FP
4682                          * must be done in that order
4683                          * FCOE_IDX < FWD_IDX < OOO_IDX
4684                          */
4685
4686                         /* move FCoE fp even NO_FCOE_FLAG is on */
4687                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4688                 bp->num_ethernet_queues -= delta;
4689                 bp->num_queues = bp->num_ethernet_queues +
4690                                  bp->num_cnic_queues;
4691                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4692                           bp->num_queues + delta, bp->num_queues);
4693         }
4694
4695         return 0;
4696 }
4697
4698 void bnx2x_free_mem_bp(struct bnx2x *bp)
4699 {
4700         int i;
4701
4702         for (i = 0; i < bp->fp_array_size; i++)
4703                 kfree(bp->fp[i].tpa_info);
4704         kfree(bp->fp);
4705         kfree(bp->sp_objs);
4706         kfree(bp->fp_stats);
4707         kfree(bp->bnx2x_txq);
4708         kfree(bp->msix_table);
4709         kfree(bp->ilt);
4710 }
4711
4712 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4713 {
4714         struct bnx2x_fastpath *fp;
4715         struct msix_entry *tbl;
4716         struct bnx2x_ilt *ilt;
4717         int msix_table_size = 0;
4718         int fp_array_size, txq_array_size;
4719         int i;
4720
4721         /*
4722          * The biggest MSI-X table we might need is as a maximum number of fast
4723          * path IGU SBs plus default SB (for PF only).
4724          */
4725         msix_table_size = bp->igu_sb_cnt;
4726         if (IS_PF(bp))
4727                 msix_table_size++;
4728         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4729
4730         /* fp array: RSS plus CNIC related L2 queues */
4731         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4732         bp->fp_array_size = fp_array_size;
4733         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4734
4735         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4736         if (!fp)
4737                 goto alloc_err;
4738         for (i = 0; i < bp->fp_array_size; i++) {
4739                 fp[i].tpa_info =
4740                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4741                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4742                 if (!(fp[i].tpa_info))
4743                         goto alloc_err;
4744         }
4745
4746         bp->fp = fp;
4747
4748         /* allocate sp objs */
4749         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4750                               GFP_KERNEL);
4751         if (!bp->sp_objs)
4752                 goto alloc_err;
4753
4754         /* allocate fp_stats */
4755         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4756                                GFP_KERNEL);
4757         if (!bp->fp_stats)
4758                 goto alloc_err;
4759
4760         /* Allocate memory for the transmission queues array */
4761         txq_array_size =
4762                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4763         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4764
4765         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4766                                 GFP_KERNEL);
4767         if (!bp->bnx2x_txq)
4768                 goto alloc_err;
4769
4770         /* msix table */
4771         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4772         if (!tbl)
4773                 goto alloc_err;
4774         bp->msix_table = tbl;
4775
4776         /* ilt */
4777         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4778         if (!ilt)
4779                 goto alloc_err;
4780         bp->ilt = ilt;
4781
4782         return 0;
4783 alloc_err:
4784         bnx2x_free_mem_bp(bp);
4785         return -ENOMEM;
4786 }
4787
4788 int bnx2x_reload_if_running(struct net_device *dev)
4789 {
4790         struct bnx2x *bp = netdev_priv(dev);
4791
4792         if (unlikely(!netif_running(dev)))
4793                 return 0;
4794
4795         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4796         return bnx2x_nic_load(bp, LOAD_NORMAL);
4797 }
4798
4799 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4800 {
4801         u32 sel_phy_idx = 0;
4802         if (bp->link_params.num_phys <= 1)
4803                 return INT_PHY;
4804
4805         if (bp->link_vars.link_up) {
4806                 sel_phy_idx = EXT_PHY1;
4807                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4808                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4809                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4810                         sel_phy_idx = EXT_PHY2;
4811         } else {
4812
4813                 switch (bnx2x_phy_selection(&bp->link_params)) {
4814                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4815                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4816                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4817                        sel_phy_idx = EXT_PHY1;
4818                        break;
4819                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4820                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4821                        sel_phy_idx = EXT_PHY2;
4822                        break;
4823                 }
4824         }
4825
4826         return sel_phy_idx;
4827 }
4828 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4829 {
4830         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4831         /*
4832          * The selected activated PHY is always after swapping (in case PHY
4833          * swapping is enabled). So when swapping is enabled, we need to reverse
4834          * the configuration
4835          */
4836
4837         if (bp->link_params.multi_phy_config &
4838             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4839                 if (sel_phy_idx == EXT_PHY1)
4840                         sel_phy_idx = EXT_PHY2;
4841                 else if (sel_phy_idx == EXT_PHY2)
4842                         sel_phy_idx = EXT_PHY1;
4843         }
4844         return LINK_CONFIG_IDX(sel_phy_idx);
4845 }
4846
4847 #ifdef NETDEV_FCOE_WWNN
4848 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4849 {
4850         struct bnx2x *bp = netdev_priv(dev);
4851         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4852
4853         switch (type) {
4854         case NETDEV_FCOE_WWNN:
4855                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4856                                 cp->fcoe_wwn_node_name_lo);
4857                 break;
4858         case NETDEV_FCOE_WWPN:
4859                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4860                                 cp->fcoe_wwn_port_name_lo);
4861                 break;
4862         default:
4863                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4864                 return -EINVAL;
4865         }
4866
4867         return 0;
4868 }
4869 #endif
4870
4871 /* called with rtnl_lock */
4872 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4873 {
4874         struct bnx2x *bp = netdev_priv(dev);
4875
4876         if (pci_num_vf(bp->pdev)) {
4877                 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4878                 return -EPERM;
4879         }
4880
4881         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4882                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4883                 return -EAGAIN;
4884         }
4885
4886         /* This does not race with packet allocation
4887          * because the actual alloc size is
4888          * only updated as part of load
4889          */
4890         dev->mtu = new_mtu;
4891
4892         if (!bnx2x_mtu_allows_gro(new_mtu))
4893                 dev->features &= ~NETIF_F_GRO_HW;
4894
4895         if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4896                 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4897
4898         return bnx2x_reload_if_running(dev);
4899 }
4900
4901 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4902                                      netdev_features_t features)
4903 {
4904         struct bnx2x *bp = netdev_priv(dev);
4905
4906         if (pci_num_vf(bp->pdev)) {
4907                 netdev_features_t changed = dev->features ^ features;
4908
4909                 /* Revert the requested changes in features if they
4910                  * would require internal reload of PF in bnx2x_set_features().
4911                  */
4912                 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4913                         features &= ~NETIF_F_RXCSUM;
4914                         features |= dev->features & NETIF_F_RXCSUM;
4915                 }
4916
4917                 if (changed & NETIF_F_LOOPBACK) {
4918                         features &= ~NETIF_F_LOOPBACK;
4919                         features |= dev->features & NETIF_F_LOOPBACK;
4920                 }
4921         }
4922
4923         /* TPA requires Rx CSUM offloading */
4924         if (!(features & NETIF_F_RXCSUM))
4925                 features &= ~NETIF_F_LRO;
4926
4927         if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4928                 features &= ~NETIF_F_GRO_HW;
4929         if (features & NETIF_F_GRO_HW)
4930                 features &= ~NETIF_F_LRO;
4931
4932         return features;
4933 }
4934
4935 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4936 {
4937         struct bnx2x *bp = netdev_priv(dev);
4938         netdev_features_t changes = features ^ dev->features;
4939         bool bnx2x_reload = false;
4940         int rc;
4941
4942         /* VFs or non SRIOV PFs should be able to change loopback feature */
4943         if (!pci_num_vf(bp->pdev)) {
4944                 if (features & NETIF_F_LOOPBACK) {
4945                         if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4946                                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4947                                 bnx2x_reload = true;
4948                         }
4949                 } else {
4950                         if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4951                                 bp->link_params.loopback_mode = LOOPBACK_NONE;
4952                                 bnx2x_reload = true;
4953                         }
4954                 }
4955         }
4956
4957         /* Don't care about GRO changes */
4958         changes &= ~NETIF_F_GRO;
4959
4960         if (changes)
4961                 bnx2x_reload = true;
4962
4963         if (bnx2x_reload) {
4964                 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4965                         dev->features = features;
4966                         rc = bnx2x_reload_if_running(dev);
4967                         return rc ? rc : 1;
4968                 }
4969                 /* else: bnx2x_nic_load() will be called at end of recovery */
4970         }
4971
4972         return 0;
4973 }
4974
4975 void bnx2x_tx_timeout(struct net_device *dev)
4976 {
4977         struct bnx2x *bp = netdev_priv(dev);
4978
4979         /* We want the information of the dump logged,
4980          * but calling bnx2x_panic() would kill all chances of recovery.
4981          */
4982         if (!bp->panic)
4983 #ifndef BNX2X_STOP_ON_ERROR
4984                 bnx2x_panic_dump(bp, false);
4985 #else
4986                 bnx2x_panic();
4987 #endif
4988
4989         /* This allows the netif to be shutdown gracefully before resetting */
4990         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4991 }
4992
4993 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4994 {
4995         struct net_device *dev = pci_get_drvdata(pdev);
4996         struct bnx2x *bp;
4997
4998         if (!dev) {
4999                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5000                 return -ENODEV;
5001         }
5002         bp = netdev_priv(dev);
5003
5004         rtnl_lock();
5005
5006         pci_save_state(pdev);
5007
5008         if (!netif_running(dev)) {
5009                 rtnl_unlock();
5010                 return 0;
5011         }
5012
5013         netif_device_detach(dev);
5014
5015         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5016
5017         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5018
5019         rtnl_unlock();
5020
5021         return 0;
5022 }
5023
5024 int bnx2x_resume(struct pci_dev *pdev)
5025 {
5026         struct net_device *dev = pci_get_drvdata(pdev);
5027         struct bnx2x *bp;
5028         int rc;
5029
5030         if (!dev) {
5031                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5032                 return -ENODEV;
5033         }
5034         bp = netdev_priv(dev);
5035
5036         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5037                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5038                 return -EAGAIN;
5039         }
5040
5041         rtnl_lock();
5042
5043         pci_restore_state(pdev);
5044
5045         if (!netif_running(dev)) {
5046                 rtnl_unlock();
5047                 return 0;
5048         }
5049
5050         bnx2x_set_power_state(bp, PCI_D0);
5051         netif_device_attach(dev);
5052
5053         rc = bnx2x_nic_load(bp, LOAD_OPEN);
5054
5055         rtnl_unlock();
5056
5057         return rc;
5058 }
5059
5060 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5061                               u32 cid)
5062 {
5063         if (!cxt) {
5064                 BNX2X_ERR("bad context pointer %p\n", cxt);
5065                 return;
5066         }
5067
5068         /* ustorm cxt validation */
5069         cxt->ustorm_ag_context.cdu_usage =
5070                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5071                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5072         /* xcontext validation */
5073         cxt->xstorm_ag_context.cdu_reserved =
5074                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5075                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5076 }
5077
5078 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5079                                     u8 fw_sb_id, u8 sb_index,
5080                                     u8 ticks)
5081 {
5082         u32 addr = BAR_CSTRORM_INTMEM +
5083                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5084         REG_WR8(bp, addr, ticks);
5085         DP(NETIF_MSG_IFUP,
5086            "port %x fw_sb_id %d sb_index %d ticks %d\n",
5087            port, fw_sb_id, sb_index, ticks);
5088 }
5089
5090 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5091                                     u16 fw_sb_id, u8 sb_index,
5092                                     u8 disable)
5093 {
5094         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5095         u32 addr = BAR_CSTRORM_INTMEM +
5096                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5097         u8 flags = REG_RD8(bp, addr);
5098         /* clear and set */
5099         flags &= ~HC_INDEX_DATA_HC_ENABLED;
5100         flags |= enable_flag;
5101         REG_WR8(bp, addr, flags);
5102         DP(NETIF_MSG_IFUP,
5103            "port %x fw_sb_id %d sb_index %d disable %d\n",
5104            port, fw_sb_id, sb_index, disable);
5105 }
5106
5107 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5108                                     u8 sb_index, u8 disable, u16 usec)
5109 {
5110         int port = BP_PORT(bp);
5111         u8 ticks = usec / BNX2X_BTR;
5112
5113         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5114
5115         disable = disable ? 1 : (usec ? 0 : 1);
5116         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5117 }
5118
5119 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5120                             u32 verbose)
5121 {
5122         smp_mb__before_atomic();
5123         set_bit(flag, &bp->sp_rtnl_state);
5124         smp_mb__after_atomic();
5125         DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5126            flag);
5127         schedule_delayed_work(&bp->sp_rtnl_task, 0);
5128 }