1 /* bnx2x_cmn.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
26 #include <linux/crash_dump.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
49 napi_hash_add(&bnx2x_fp(bp, i, napi));
53 static void bnx2x_add_all_napi(struct bnx2x *bp)
57 /* Add NAPI objects */
58 for_each_eth_queue(bp, i) {
59 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
60 bnx2x_poll, NAPI_POLL_WEIGHT);
61 napi_hash_add(&bnx2x_fp(bp, i, napi));
65 static int bnx2x_calc_num_queues(struct bnx2x *bp)
67 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
69 /* Reduce memory usage in kdump environment by using only one queue */
70 if (is_kdump_kernel())
73 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
78 * bnx2x_move_fp - move content of the fastpath structure.
81 * @from: source FP index
82 * @to: destination FP index
84 * Makes sure the contents of the bp->fp[to].napi is kept
85 * intact. This is done by first copying the napi struct from
86 * the target to the source, and then mem copying the entire
87 * source onto the target. Update txdata pointers and related
90 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
92 struct bnx2x_fastpath *from_fp = &bp->fp[from];
93 struct bnx2x_fastpath *to_fp = &bp->fp[to];
94 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
95 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
96 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
97 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
98 int old_max_eth_txqs, new_max_eth_txqs;
99 int old_txdata_index = 0, new_txdata_index = 0;
100 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
102 /* Copy the NAPI object as it has been already initialized */
103 from_fp->napi = to_fp->napi;
105 /* Move bnx2x_fastpath contents */
106 memcpy(to_fp, from_fp, sizeof(*to_fp));
109 /* Retain the tpa_info of the original `to' version as we don't want
110 * 2 FPs to contain the same tpa_info pointer.
112 to_fp->tpa_info = old_tpa_info;
114 /* move sp_objs contents as well, as their indices match fp ones */
115 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
117 /* move fp_stats contents as well, as their indices match fp ones */
118 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
120 /* Update txdata pointers in fp and move txdata content accordingly:
121 * Each fp consumes 'max_cos' txdata structures, so the index should be
122 * decremented by max_cos x delta.
125 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
126 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
128 if (from == FCOE_IDX(bp)) {
129 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
133 memcpy(&bp->bnx2x_txq[new_txdata_index],
134 &bp->bnx2x_txq[old_txdata_index],
135 sizeof(struct bnx2x_fp_txdata));
136 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
140 * bnx2x_fill_fw_str - Fill buffer with FW version string.
143 * @buf: character buffer to fill with the fw name
144 * @buf_len: length of the above buffer
147 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
150 u8 phy_fw_ver[PHY_FW_VER_LEN];
152 phy_fw_ver[0] = '\0';
153 bnx2x_get_ext_phy_fw_version(&bp->link_params,
154 phy_fw_ver, PHY_FW_VER_LEN);
155 strlcpy(buf, bp->fw_ver, buf_len);
156 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
158 (bp->common.bc_ver & 0xff0000) >> 16,
159 (bp->common.bc_ver & 0xff00) >> 8,
160 (bp->common.bc_ver & 0xff),
161 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
163 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
168 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
171 * @delta: number of eth queues which were not allocated
173 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
175 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
177 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
178 * backward along the array could cause memory to be overridden
180 for (cos = 1; cos < bp->max_cos; cos++) {
181 for (i = 0; i < old_eth_num - delta; i++) {
182 struct bnx2x_fastpath *fp = &bp->fp[i];
183 int new_idx = cos * (old_eth_num - delta) + i;
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
186 sizeof(struct bnx2x_fp_txdata));
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
192 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
194 /* free skb in the packet ring at pos idx
195 * return idx of last bd freed
197 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
198 u16 idx, unsigned int *pkts_compl,
199 unsigned int *bytes_compl)
201 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
202 struct eth_tx_start_bd *tx_start_bd;
203 struct eth_tx_bd *tx_data_bd;
204 struct sk_buff *skb = tx_buf->skb;
205 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
207 u16 split_bd_len = 0;
209 /* prefetch skb end pointer to speedup dev_kfree_skb() */
212 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
213 txdata->txq_index, idx, tx_buf, skb);
215 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
217 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
218 #ifdef BNX2X_STOP_ON_ERROR
219 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
220 BNX2X_ERR("BAD nbd!\n");
224 new_cons = nbd + tx_buf->first_bd;
226 /* Get the next bd */
227 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229 /* Skip a parse bd... */
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
233 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
234 /* Skip second parse bd... */
236 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
239 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
240 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
241 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
242 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
244 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
248 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
249 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
255 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
256 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
257 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
259 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
266 (*bytes_compl) += skb->len;
267 dev_kfree_skb_any(skb);
270 tx_buf->first_bd = 0;
276 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
278 struct netdev_queue *txq;
279 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
280 unsigned int pkts_compl = 0, bytes_compl = 0;
282 #ifdef BNX2X_STOP_ON_ERROR
283 if (unlikely(bp->panic))
287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
288 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
289 sw_cons = txdata->tx_pkt_cons;
291 while (sw_cons != hw_cons) {
294 pkt_cons = TX_BD(sw_cons);
296 DP(NETIF_MSG_TX_DONE,
297 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
298 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
301 &pkts_compl, &bytes_compl);
306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
308 txdata->tx_pkt_cons = sw_cons;
309 txdata->tx_bd_cons = bd_cons;
311 /* Need to make the tx_bd_cons update visible to start_xmit()
312 * before checking for netif_tx_queue_stopped(). Without the
313 * memory barrier, there is a small possibility that
314 * start_xmit() will miss it and cause the queue to be stopped
316 * On the other hand we need an rmb() here to ensure the proper
317 * ordering of bit testing in the following
318 * netif_tx_queue_stopped(txq) call.
322 if (unlikely(netif_tx_queue_stopped(txq))) {
323 /* Taking tx_lock() is needed to prevent re-enabling the queue
324 * while it's empty. This could have happen if rx_action() gets
325 * suspended in bnx2x_tx_int() after the condition before
326 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
328 * stops the queue->sees fresh tx_bd_cons->releases the queue->
329 * sends some packets consuming the whole queue again->
333 __netif_tx_lock(txq, smp_processor_id());
335 if ((netif_tx_queue_stopped(txq)) &&
336 (bp->state == BNX2X_STATE_OPEN) &&
337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
338 netif_tx_wake_queue(txq);
340 __netif_tx_unlock(txq);
345 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
348 u16 last_max = fp->last_max_sge;
350 if (SUB_S16(idx, last_max) > 0)
351 fp->last_max_sge = idx;
354 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
356 struct eth_end_agg_rx_cqe *cqe)
358 struct bnx2x *bp = fp->bp;
359 u16 last_max, last_elem, first_elem;
366 /* First mark all used pages */
367 for (i = 0; i < sge_len; i++)
368 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
374 /* Here we assume that the last SGE index is the biggest */
375 prefetch((void *)(fp->sge_mask));
376 bnx2x_update_last_max_sge(fp,
377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
379 last_max = RX_SGE(fp->last_max_sge);
380 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
383 /* If ring is not full */
384 if (last_elem + 1 != first_elem)
387 /* Now update the prod */
388 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389 if (likely(fp->sge_mask[i]))
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393 delta += BIT_VEC64_ELEM_SZ;
397 fp->rx_sge_prod += delta;
398 /* clear page-end entries */
399 bnx2x_clear_sge_mask_next_elems(fp);
402 DP(NETIF_MSG_RX_STATUS,
403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
404 fp->last_max_sge, fp->rx_sge_prod);
407 /* Get Toeplitz hash value in the skb using the value from the
408 * CQE (calculated by HW).
410 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
411 const struct eth_fast_path_rx_cqe *cqe,
412 enum pkt_hash_types *rxhash_type)
414 /* Get Toeplitz hash from CQE */
415 if ((bp->dev->features & NETIF_F_RXHASH) &&
416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417 enum eth_rss_hash_type htype;
419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
420 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421 (htype == TCP_IPV6_HASH_TYPE)) ?
422 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
424 return le32_to_cpu(cqe->rss_hash_result);
426 *rxhash_type = PKT_HASH_TYPE_NONE;
430 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
432 struct eth_fast_path_rx_cqe *cqe)
434 struct bnx2x *bp = fp->bp;
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
442 /* print error if current state != stop */
443 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
444 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
446 /* Try to map an empty data buffer from the aggregation info */
447 mapping = dma_map_single(&bp->pdev->dev,
448 first_buf->data + NET_SKB_PAD,
449 fp->rx_buf_size, DMA_FROM_DEVICE);
451 * ...if it fails - move the skb from the consumer to the producer
452 * and set the current aggregation state as ERROR to drop it
453 * when TPA_STOP arrives.
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457 /* Move the BD from the consumer to the producer */
458 bnx2x_reuse_rx_data(fp, cons, prod);
459 tpa_info->tpa_state = BNX2X_TPA_ERROR;
463 /* move empty data from pool to prod */
464 prod_rx_buf->data = first_buf->data;
465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
466 /* point prod_bd to new data */
467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
470 /* move partial skb from cons to pool (don't unmap yet) */
471 *first_buf = *cons_rx_buf;
473 /* mark bin state as START */
474 tpa_info->parsing_flags =
475 le16_to_cpu(cqe->pars_flags.flags);
476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477 tpa_info->tpa_state = BNX2X_TPA_START;
478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479 tpa_info->placement_offset = cqe->placement_offset;
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
481 if (fp->mode == TPA_MODE_GRO) {
482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
484 tpa_info->gro_size = gro_size;
487 #ifdef BNX2X_STOP_ON_ERROR
488 fp->tpa_queue_used |= (1 << queue);
489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
494 /* Timestamp option length allowed for TPA aggregation:
496 * nop nop kind length echo val
498 #define TPA_TSTAMP_OPT_LEN 12
500 * bnx2x_set_gro_params - compute GRO values
503 * @parsing_flags: parsing flags from the START CQE
504 * @len_on_bd: total length of the first packet for the
506 * @pkt_len: length of all segments
508 * Approximate value of the MSS for this aggregation calculated using
509 * the first packet of it.
510 * Compute number of aggregated segments, and gso_type.
512 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
513 u16 len_on_bd, unsigned int pkt_len,
514 u16 num_of_coalesced_segs)
516 /* TPA aggregation won't have either IP options or TCP options
517 * other than timestamp or IPv6 extension headers.
519 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
521 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
522 PRS_FLAG_OVERETH_IPV6) {
523 hdrs_len += sizeof(struct ipv6hdr);
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
526 hdrs_len += sizeof(struct iphdr);
527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
530 /* Check if there was a TCP timestamp, if there is it's will
531 * always be 12 bytes length: nop nop kind length echo val.
533 * Otherwise FW would close the aggregation.
535 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
536 hdrs_len += TPA_TSTAMP_OPT_LEN;
538 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
540 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
541 * to skb_shinfo(skb)->gso_segs
543 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
546 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
547 u16 index, gfp_t gfp_mask)
549 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
550 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
551 struct bnx2x_alloc_pool *pool = &fp->page_pool;
554 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
556 /* put page reference used by the memory pool, since we
557 * won't be using this page as the mempool anymore.
560 put_page(pool->page);
562 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
563 if (unlikely(!pool->page)) {
564 BNX2X_ERR("Can't alloc sge\n");
571 mapping = dma_map_page(&bp->pdev->dev, pool->page,
572 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
573 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
574 BNX2X_ERR("Can't map sge\n");
578 get_page(pool->page);
579 sw_buf->page = pool->page;
580 sw_buf->offset = pool->offset;
582 dma_unmap_addr_set(sw_buf, mapping, mapping);
584 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
585 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
587 pool->offset += SGE_PAGE_SIZE;
592 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
593 struct bnx2x_agg_info *tpa_info,
596 struct eth_end_agg_rx_cqe *cqe,
599 struct sw_rx_page *rx_pg, old_rx_pg;
600 u32 i, frag_len, frag_size;
601 int err, j, frag_id = 0;
602 u16 len_on_bd = tpa_info->len_on_bd;
603 u16 full_page = 0, gro_size = 0;
605 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
607 if (fp->mode == TPA_MODE_GRO) {
608 gro_size = tpa_info->gro_size;
609 full_page = tpa_info->full_page;
612 /* This is needed in order to enable forwarding support */
614 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
615 le16_to_cpu(cqe->pkt_len),
616 le16_to_cpu(cqe->num_of_coalesced_segs));
618 #ifdef BNX2X_STOP_ON_ERROR
619 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
620 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
622 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
628 /* Run through the SGL and compose the fragmented skb */
629 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
630 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
632 /* FW gives the indices of the SGE as if the ring is an array
633 (meaning that "next" element will consume 2 indices) */
634 if (fp->mode == TPA_MODE_GRO)
635 frag_len = min_t(u32, frag_size, (u32)full_page);
637 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
639 rx_pg = &fp->rx_page_ring[sge_idx];
642 /* If we fail to allocate a substitute page, we simply stop
643 where we are and drop the whole packet */
644 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
646 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
650 dma_unmap_page(&bp->pdev->dev,
651 dma_unmap_addr(&old_rx_pg, mapping),
652 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
653 /* Add one frag and update the appropriate fields in the skb */
654 if (fp->mode == TPA_MODE_LRO)
655 skb_fill_page_desc(skb, j, old_rx_pg.page,
656 old_rx_pg.offset, frag_len);
660 for (rem = frag_len; rem > 0; rem -= gro_size) {
661 int len = rem > gro_size ? gro_size : rem;
662 skb_fill_page_desc(skb, frag_id++,
664 old_rx_pg.offset + offset,
667 get_page(old_rx_pg.page);
672 skb->data_len += frag_len;
673 skb->truesize += SGE_PAGES;
674 skb->len += frag_len;
676 frag_size -= frag_len;
682 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
684 if (fp->rx_frag_size)
690 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
692 if (fp->rx_frag_size) {
693 /* GFP_KERNEL allocations are used only during initialization */
694 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
695 return (void *)__get_free_page(gfp_mask);
697 return netdev_alloc_frag(fp->rx_frag_size);
700 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
704 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
706 const struct iphdr *iph = ip_hdr(skb);
709 skb_set_transport_header(skb, sizeof(struct iphdr));
712 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
713 iph->saddr, iph->daddr, 0);
716 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
718 struct ipv6hdr *iph = ipv6_hdr(skb);
721 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
724 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
725 &iph->saddr, &iph->daddr, 0);
728 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
729 void (*gro_func)(struct bnx2x*, struct sk_buff*))
731 skb_set_network_header(skb, 0);
733 tcp_gro_complete(skb);
737 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
741 if (skb_shinfo(skb)->gso_size) {
742 switch (be16_to_cpu(skb->protocol)) {
744 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
747 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
750 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
751 be16_to_cpu(skb->protocol));
755 skb_record_rx_queue(skb, fp->rx_queue);
756 napi_gro_receive(&fp->napi, skb);
759 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
760 struct bnx2x_agg_info *tpa_info,
762 struct eth_end_agg_rx_cqe *cqe,
765 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
766 u8 pad = tpa_info->placement_offset;
767 u16 len = tpa_info->len_on_bd;
768 struct sk_buff *skb = NULL;
769 u8 *new_data, *data = rx_buf->data;
770 u8 old_tpa_state = tpa_info->tpa_state;
772 tpa_info->tpa_state = BNX2X_TPA_STOP;
774 /* If we there was an error during the handling of the TPA_START -
775 * drop this aggregation.
777 if (old_tpa_state == BNX2X_TPA_ERROR)
780 /* Try to allocate the new data */
781 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
782 /* Unmap skb in the pool anyway, as we are going to change
783 pool entry status to BNX2X_TPA_STOP even if new skb allocation
785 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
786 fp->rx_buf_size, DMA_FROM_DEVICE);
787 if (likely(new_data))
788 skb = build_skb(data, fp->rx_frag_size);
791 #ifdef BNX2X_STOP_ON_ERROR
792 if (pad + len > fp->rx_buf_size) {
793 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
794 pad, len, fp->rx_buf_size);
800 skb_reserve(skb, pad + NET_SKB_PAD);
802 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
804 skb->protocol = eth_type_trans(skb, bp->dev);
805 skb->ip_summed = CHECKSUM_UNNECESSARY;
807 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
808 skb, cqe, cqe_idx)) {
809 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
810 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
811 bnx2x_gro_receive(bp, fp, skb);
813 DP(NETIF_MSG_RX_STATUS,
814 "Failed to allocate new pages - dropping packet!\n");
815 dev_kfree_skb_any(skb);
818 /* put new data in bin */
819 rx_buf->data = new_data;
824 bnx2x_frag_free(fp, new_data);
826 /* drop the packet and keep the buffer in the bin */
827 DP(NETIF_MSG_RX_STATUS,
828 "Failed to allocate or map a new skb - dropping packet!\n");
829 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
832 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
833 u16 index, gfp_t gfp_mask)
836 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
837 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
840 data = bnx2x_frag_alloc(fp, gfp_mask);
841 if (unlikely(data == NULL))
844 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
847 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
848 bnx2x_frag_free(fp, data);
849 BNX2X_ERR("Can't map rx data\n");
854 dma_unmap_addr_set(rx_buf, mapping, mapping);
856 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
857 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
863 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
864 struct bnx2x_fastpath *fp,
865 struct bnx2x_eth_q_stats *qstats)
867 /* Do nothing if no L4 csum validation was done.
868 * We do not check whether IP csum was validated. For IPv4 we assume
869 * that if the card got as far as validating the L4 csum, it also
870 * validated the IP csum. IPv6 has no IP csum.
872 if (cqe->fast_path_cqe.status_flags &
873 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
876 /* If L4 validation was done, check if an error was found. */
878 if (cqe->fast_path_cqe.type_error_flags &
879 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
880 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
881 qstats->hw_csum_err++;
883 skb->ip_summed = CHECKSUM_UNNECESSARY;
886 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
888 struct bnx2x *bp = fp->bp;
889 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
890 u16 sw_comp_cons, sw_comp_prod;
892 union eth_rx_cqe *cqe;
893 struct eth_fast_path_rx_cqe *cqe_fp;
895 #ifdef BNX2X_STOP_ON_ERROR
896 if (unlikely(bp->panic))
902 bd_cons = fp->rx_bd_cons;
903 bd_prod = fp->rx_bd_prod;
904 bd_prod_fw = bd_prod;
905 sw_comp_cons = fp->rx_comp_cons;
906 sw_comp_prod = fp->rx_comp_prod;
908 comp_ring_cons = RCQ_BD(sw_comp_cons);
909 cqe = &fp->rx_comp_ring[comp_ring_cons];
910 cqe_fp = &cqe->fast_path_cqe;
912 DP(NETIF_MSG_RX_STATUS,
913 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
915 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
916 struct sw_rx_bd *rx_buf = NULL;
919 enum eth_rx_cqe_type cqe_fp_type;
923 enum pkt_hash_types rxhash_type;
925 #ifdef BNX2X_STOP_ON_ERROR
926 if (unlikely(bp->panic))
930 bd_prod = RX_BD(bd_prod);
931 bd_cons = RX_BD(bd_cons);
933 /* A rmb() is required to ensure that the CQE is not read
934 * before it is written by the adapter DMA. PCI ordering
935 * rules will make sure the other fields are written before
936 * the marker at the end of struct eth_fast_path_rx_cqe
937 * but without rmb() a weakly ordered processor can process
938 * stale data. Without the barrier TPA state-machine might
939 * enter inconsistent state and kernel stack might be
940 * provided with incorrect packet description - these lead
941 * to various kernel crashed.
945 cqe_fp_flags = cqe_fp->type_error_flags;
946 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
948 DP(NETIF_MSG_RX_STATUS,
949 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
950 CQE_TYPE(cqe_fp_flags),
951 cqe_fp_flags, cqe_fp->status_flags,
952 le32_to_cpu(cqe_fp->rss_hash_result),
953 le16_to_cpu(cqe_fp->vlan_tag),
954 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
956 /* is this a slowpath msg? */
957 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
958 bnx2x_sp_event(fp, cqe);
962 rx_buf = &fp->rx_buf_ring[bd_cons];
965 if (!CQE_TYPE_FAST(cqe_fp_type)) {
966 struct bnx2x_agg_info *tpa_info;
967 u16 frag_size, pages;
968 #ifdef BNX2X_STOP_ON_ERROR
970 if (fp->mode == TPA_MODE_DISABLED &&
971 (CQE_TYPE_START(cqe_fp_type) ||
972 CQE_TYPE_STOP(cqe_fp_type)))
973 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
974 CQE_TYPE(cqe_fp_type));
977 if (CQE_TYPE_START(cqe_fp_type)) {
978 u16 queue = cqe_fp->queue_index;
979 DP(NETIF_MSG_RX_STATUS,
980 "calling tpa_start on queue %d\n",
983 bnx2x_tpa_start(fp, queue,
989 queue = cqe->end_agg_cqe.queue_index;
990 tpa_info = &fp->tpa_info[queue];
991 DP(NETIF_MSG_RX_STATUS,
992 "calling tpa_stop on queue %d\n",
995 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
998 if (fp->mode == TPA_MODE_GRO)
999 pages = (frag_size + tpa_info->full_page - 1) /
1000 tpa_info->full_page;
1002 pages = SGE_PAGE_ALIGN(frag_size) >>
1005 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1006 &cqe->end_agg_cqe, comp_ring_cons);
1007 #ifdef BNX2X_STOP_ON_ERROR
1012 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1016 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1017 pad = cqe_fp->placement_offset;
1018 dma_sync_single_for_cpu(&bp->pdev->dev,
1019 dma_unmap_addr(rx_buf, mapping),
1020 pad + RX_COPY_THRESH,
1023 prefetch(data + pad); /* speedup eth_type_trans() */
1024 /* is this an error packet? */
1025 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1026 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1027 "ERROR flags %x rx packet %u\n",
1028 cqe_fp_flags, sw_comp_cons);
1029 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1033 /* Since we don't have a jumbo ring
1034 * copy small packets if mtu > 1500
1036 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1037 (len <= RX_COPY_THRESH)) {
1038 skb = napi_alloc_skb(&fp->napi, len);
1040 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1041 "ERROR packet dropped because of alloc failure\n");
1042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1045 memcpy(skb->data, data + pad, len);
1046 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1048 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1049 GFP_ATOMIC) == 0)) {
1050 dma_unmap_single(&bp->pdev->dev,
1051 dma_unmap_addr(rx_buf, mapping),
1054 skb = build_skb(data, fp->rx_frag_size);
1055 if (unlikely(!skb)) {
1056 bnx2x_frag_free(fp, data);
1057 bnx2x_fp_qstats(bp, fp)->
1058 rx_skb_alloc_failed++;
1061 skb_reserve(skb, pad);
1063 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1064 "ERROR packet dropped because of alloc failure\n");
1065 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1067 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1073 skb->protocol = eth_type_trans(skb, bp->dev);
1075 /* Set Toeplitz hash for a none-LRO skb */
1076 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1077 skb_set_hash(skb, rxhash, rxhash_type);
1079 skb_checksum_none_assert(skb);
1081 if (bp->dev->features & NETIF_F_RXCSUM)
1082 bnx2x_csum_validate(skb, cqe, fp,
1083 bnx2x_fp_qstats(bp, fp));
1085 skb_record_rx_queue(skb, fp->rx_queue);
1087 /* Check if this packet was timestamped */
1088 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1089 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1090 bnx2x_set_rx_ts(bp, skb);
1092 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1094 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1095 le16_to_cpu(cqe_fp->vlan_tag));
1097 skb_mark_napi_id(skb, &fp->napi);
1099 if (bnx2x_fp_ll_polling(fp))
1100 netif_receive_skb(skb);
1102 napi_gro_receive(&fp->napi, skb);
1104 rx_buf->data = NULL;
1106 bd_cons = NEXT_RX_IDX(bd_cons);
1107 bd_prod = NEXT_RX_IDX(bd_prod);
1108 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1111 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1112 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1114 /* mark CQE as free */
1115 BNX2X_SEED_CQE(cqe_fp);
1117 if (rx_pkt == budget)
1120 comp_ring_cons = RCQ_BD(sw_comp_cons);
1121 cqe = &fp->rx_comp_ring[comp_ring_cons];
1122 cqe_fp = &cqe->fast_path_cqe;
1125 fp->rx_bd_cons = bd_cons;
1126 fp->rx_bd_prod = bd_prod_fw;
1127 fp->rx_comp_cons = sw_comp_cons;
1128 fp->rx_comp_prod = sw_comp_prod;
1130 /* Update producers */
1131 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1134 fp->rx_pkt += rx_pkt;
1140 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1142 struct bnx2x_fastpath *fp = fp_cookie;
1143 struct bnx2x *bp = fp->bp;
1147 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1148 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1152 #ifdef BNX2X_STOP_ON_ERROR
1153 if (unlikely(bp->panic))
1157 /* Handle Rx and Tx according to MSI-X vector */
1158 for_each_cos_in_tx_queue(fp, cos)
1159 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1161 prefetch(&fp->sb_running_index[SM_RX_ID]);
1162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1167 /* HW Lock for shared dual port PHYs */
1168 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1170 mutex_lock(&bp->port.phy_mutex);
1172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1175 void bnx2x_release_phy_lock(struct bnx2x *bp)
1177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1179 mutex_unlock(&bp->port.phy_mutex);
1182 /* calculates MF speed according to current linespeed and MF configuration */
1183 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1185 u16 line_speed = bp->link_vars.line_speed;
1187 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1188 bp->mf_config[BP_VN(bp)]);
1190 /* Calculate the current MAX line speed limit for the MF
1193 if (IS_MF_PERCENT_BW(bp))
1194 line_speed = (line_speed * maxCfg) / 100;
1195 else { /* SD mode */
1196 u16 vn_max_rate = maxCfg * 100;
1198 if (vn_max_rate < line_speed)
1199 line_speed = vn_max_rate;
1207 * bnx2x_fill_report_data - fill link report data to report
1209 * @bp: driver handle
1210 * @data: link state to update
1212 * It uses a none-atomic bit operations because is called under the mutex.
1214 static void bnx2x_fill_report_data(struct bnx2x *bp,
1215 struct bnx2x_link_report_data *data)
1217 memset(data, 0, sizeof(*data));
1220 /* Fill the report data: effective line speed */
1221 data->line_speed = bnx2x_get_mf_speed(bp);
1224 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1225 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1226 &data->link_report_flags);
1228 if (!BNX2X_NUM_ETH_QUEUES(bp))
1229 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1230 &data->link_report_flags);
1233 if (bp->link_vars.duplex == DUPLEX_FULL)
1234 __set_bit(BNX2X_LINK_REPORT_FD,
1235 &data->link_report_flags);
1237 /* Rx Flow Control is ON */
1238 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1239 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1240 &data->link_report_flags);
1242 /* Tx Flow Control is ON */
1243 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1244 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1245 &data->link_report_flags);
1247 *data = bp->vf_link_vars;
1252 * bnx2x_link_report - report link status to OS.
1254 * @bp: driver handle
1256 * Calls the __bnx2x_link_report() under the same locking scheme
1257 * as a link/PHY state managing code to ensure a consistent link
1261 void bnx2x_link_report(struct bnx2x *bp)
1263 bnx2x_acquire_phy_lock(bp);
1264 __bnx2x_link_report(bp);
1265 bnx2x_release_phy_lock(bp);
1269 * __bnx2x_link_report - report link status to OS.
1271 * @bp: driver handle
1273 * None atomic implementation.
1274 * Should be called under the phy_lock.
1276 void __bnx2x_link_report(struct bnx2x *bp)
1278 struct bnx2x_link_report_data cur_data;
1281 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1282 bnx2x_read_mf_cfg(bp);
1284 /* Read the current link report info */
1285 bnx2x_fill_report_data(bp, &cur_data);
1287 /* Don't report link down or exactly the same link status twice */
1288 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1289 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &bp->last_reported_link.link_report_flags) &&
1291 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1292 &cur_data.link_report_flags)))
1297 /* We are going to report a new link parameters now -
1298 * remember the current data for the next time.
1300 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1302 /* propagate status to VFs */
1304 bnx2x_iov_link_update(bp);
1306 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1307 &cur_data.link_report_flags)) {
1308 netif_carrier_off(bp->dev);
1309 netdev_err(bp->dev, "NIC Link is Down\n");
1315 netif_carrier_on(bp->dev);
1317 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1318 &cur_data.link_report_flags))
1323 /* Handle the FC at the end so that only these flags would be
1324 * possibly set. This way we may easily check if there is no FC
1327 if (cur_data.link_report_flags) {
1328 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1329 &cur_data.link_report_flags)) {
1330 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1331 &cur_data.link_report_flags))
1332 flow = "ON - receive & transmit";
1334 flow = "ON - receive";
1336 flow = "ON - transmit";
1341 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1342 cur_data.line_speed, duplex, flow);
1346 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1350 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1351 struct eth_rx_sge *sge;
1353 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1355 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1356 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1359 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1360 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1364 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1365 struct bnx2x_fastpath *fp, int last)
1369 for (i = 0; i < last; i++) {
1370 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1371 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1372 u8 *data = first_buf->data;
1375 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1378 if (tpa_info->tpa_state == BNX2X_TPA_START)
1379 dma_unmap_single(&bp->pdev->dev,
1380 dma_unmap_addr(first_buf, mapping),
1381 fp->rx_buf_size, DMA_FROM_DEVICE);
1382 bnx2x_frag_free(fp, data);
1383 first_buf->data = NULL;
1387 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1391 for_each_rx_queue_cnic(bp, j) {
1392 struct bnx2x_fastpath *fp = &bp->fp[j];
1396 /* Activate BD ring */
1398 * this will generate an interrupt (to the TSTORM)
1399 * must only be done after chip is initialized
1401 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1406 void bnx2x_init_rx_rings(struct bnx2x *bp)
1408 int func = BP_FUNC(bp);
1412 /* Allocate TPA resources */
1413 for_each_eth_queue(bp, j) {
1414 struct bnx2x_fastpath *fp = &bp->fp[j];
1417 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1419 if (fp->mode != TPA_MODE_DISABLED) {
1420 /* Fill the per-aggregation pool */
1421 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1422 struct bnx2x_agg_info *tpa_info =
1424 struct sw_rx_bd *first_buf =
1425 &tpa_info->first_buf;
1428 bnx2x_frag_alloc(fp, GFP_KERNEL);
1429 if (!first_buf->data) {
1430 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1432 bnx2x_free_tpa_pool(bp, fp, i);
1433 fp->mode = TPA_MODE_DISABLED;
1436 dma_unmap_addr_set(first_buf, mapping, 0);
1437 tpa_info->tpa_state = BNX2X_TPA_STOP;
1440 /* "next page" elements initialization */
1441 bnx2x_set_next_page_sgl(fp);
1443 /* set SGEs bit mask */
1444 bnx2x_init_sge_ring_bit_mask(fp);
1446 /* Allocate SGEs and initialize the ring elements */
1447 for (i = 0, ring_prod = 0;
1448 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1450 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1452 BNX2X_ERR("was only able to allocate %d rx sges\n",
1454 BNX2X_ERR("disabling TPA for queue[%d]\n",
1456 /* Cleanup already allocated elements */
1457 bnx2x_free_rx_sge_range(bp, fp,
1459 bnx2x_free_tpa_pool(bp, fp,
1461 fp->mode = TPA_MODE_DISABLED;
1465 ring_prod = NEXT_SGE_IDX(ring_prod);
1468 fp->rx_sge_prod = ring_prod;
1472 for_each_eth_queue(bp, j) {
1473 struct bnx2x_fastpath *fp = &bp->fp[j];
1477 /* Activate BD ring */
1479 * this will generate an interrupt (to the TSTORM)
1480 * must only be done after chip is initialized
1482 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1488 if (CHIP_IS_E1(bp)) {
1489 REG_WR(bp, BAR_USTRORM_INTMEM +
1490 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1491 U64_LO(fp->rx_comp_mapping));
1492 REG_WR(bp, BAR_USTRORM_INTMEM +
1493 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1494 U64_HI(fp->rx_comp_mapping));
1499 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1502 struct bnx2x *bp = fp->bp;
1504 for_each_cos_in_tx_queue(fp, cos) {
1505 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1506 unsigned pkts_compl = 0, bytes_compl = 0;
1508 u16 sw_prod = txdata->tx_pkt_prod;
1509 u16 sw_cons = txdata->tx_pkt_cons;
1511 while (sw_cons != sw_prod) {
1512 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1513 &pkts_compl, &bytes_compl);
1517 netdev_tx_reset_queue(
1518 netdev_get_tx_queue(bp->dev,
1519 txdata->txq_index));
1523 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1527 for_each_tx_queue_cnic(bp, i) {
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1532 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1536 for_each_eth_queue(bp, i) {
1537 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1541 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1543 struct bnx2x *bp = fp->bp;
1546 /* ring wasn't allocated */
1547 if (fp->rx_buf_ring == NULL)
1550 for (i = 0; i < NUM_RX_BD; i++) {
1551 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1552 u8 *data = rx_buf->data;
1556 dma_unmap_single(&bp->pdev->dev,
1557 dma_unmap_addr(rx_buf, mapping),
1558 fp->rx_buf_size, DMA_FROM_DEVICE);
1560 rx_buf->data = NULL;
1561 bnx2x_frag_free(fp, data);
1565 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1569 for_each_rx_queue_cnic(bp, j) {
1570 bnx2x_free_rx_bds(&bp->fp[j]);
1574 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1578 for_each_eth_queue(bp, j) {
1579 struct bnx2x_fastpath *fp = &bp->fp[j];
1581 bnx2x_free_rx_bds(fp);
1583 if (fp->mode != TPA_MODE_DISABLED)
1584 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1588 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1590 bnx2x_free_tx_skbs_cnic(bp);
1591 bnx2x_free_rx_skbs_cnic(bp);
1594 void bnx2x_free_skbs(struct bnx2x *bp)
1596 bnx2x_free_tx_skbs(bp);
1597 bnx2x_free_rx_skbs(bp);
1600 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1602 /* load old values */
1603 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1605 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1606 /* leave all but MAX value */
1607 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1609 /* set new MAX value */
1610 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1611 & FUNC_MF_CFG_MAX_BW_MASK;
1613 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1618 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1620 * @bp: driver handle
1621 * @nvecs: number of vectors to be released
1623 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1627 if (nvecs == offset)
1630 /* VFs don't have a default SB */
1632 free_irq(bp->msix_table[offset].vector, bp->dev);
1633 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1634 bp->msix_table[offset].vector);
1638 if (CNIC_SUPPORT(bp)) {
1639 if (nvecs == offset)
1644 for_each_eth_queue(bp, i) {
1645 if (nvecs == offset)
1647 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1648 i, bp->msix_table[offset].vector);
1650 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1654 void bnx2x_free_irq(struct bnx2x *bp)
1656 if (bp->flags & USING_MSIX_FLAG &&
1657 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1658 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1660 /* vfs don't have a default status block */
1664 bnx2x_free_msix_irqs(bp, nvecs);
1666 free_irq(bp->dev->irq, bp->dev);
1670 int bnx2x_enable_msix(struct bnx2x *bp)
1672 int msix_vec = 0, i, rc;
1674 /* VFs don't have a default status block */
1676 bp->msix_table[msix_vec].entry = msix_vec;
1677 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1678 bp->msix_table[0].entry);
1682 /* Cnic requires an msix vector for itself */
1683 if (CNIC_SUPPORT(bp)) {
1684 bp->msix_table[msix_vec].entry = msix_vec;
1685 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1686 msix_vec, bp->msix_table[msix_vec].entry);
1690 /* We need separate vectors for ETH queues only (not FCoE) */
1691 for_each_eth_queue(bp, i) {
1692 bp->msix_table[msix_vec].entry = msix_vec;
1693 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1694 msix_vec, msix_vec, i);
1698 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1701 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1702 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1704 * reconfigure number of tx/rx queues according to available
1707 if (rc == -ENOSPC) {
1708 /* Get by with single vector */
1709 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1711 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1716 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1717 bp->flags |= USING_SINGLE_MSIX_FLAG;
1719 BNX2X_DEV_INFO("set number of queues to 1\n");
1720 bp->num_ethernet_queues = 1;
1721 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1722 } else if (rc < 0) {
1723 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1725 } else if (rc < msix_vec) {
1726 /* how less vectors we will have? */
1727 int diff = msix_vec - rc;
1729 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1732 * decrease number of queues by number of unallocated entries
1734 bp->num_ethernet_queues -= diff;
1735 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1737 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1741 bp->flags |= USING_MSIX_FLAG;
1746 /* fall to INTx if not enough memory */
1748 bp->flags |= DISABLE_MSI_FLAG;
1753 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1755 int i, rc, offset = 0;
1757 /* no default status block for vf */
1759 rc = request_irq(bp->msix_table[offset++].vector,
1760 bnx2x_msix_sp_int, 0,
1761 bp->dev->name, bp->dev);
1763 BNX2X_ERR("request sp irq failed\n");
1768 if (CNIC_SUPPORT(bp))
1771 for_each_eth_queue(bp, i) {
1772 struct bnx2x_fastpath *fp = &bp->fp[i];
1773 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1776 rc = request_irq(bp->msix_table[offset].vector,
1777 bnx2x_msix_fp_int, 0, fp->name, fp);
1779 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1780 bp->msix_table[offset].vector, rc);
1781 bnx2x_free_msix_irqs(bp, offset);
1788 i = BNX2X_NUM_ETH_QUEUES(bp);
1790 offset = 1 + CNIC_SUPPORT(bp);
1791 netdev_info(bp->dev,
1792 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1793 bp->msix_table[0].vector,
1794 0, bp->msix_table[offset].vector,
1795 i - 1, bp->msix_table[offset + i - 1].vector);
1797 offset = CNIC_SUPPORT(bp);
1798 netdev_info(bp->dev,
1799 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1800 0, bp->msix_table[offset].vector,
1801 i - 1, bp->msix_table[offset + i - 1].vector);
1806 int bnx2x_enable_msi(struct bnx2x *bp)
1810 rc = pci_enable_msi(bp->pdev);
1812 BNX2X_DEV_INFO("MSI is not attainable\n");
1815 bp->flags |= USING_MSI_FLAG;
1820 static int bnx2x_req_irq(struct bnx2x *bp)
1822 unsigned long flags;
1825 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1828 flags = IRQF_SHARED;
1830 if (bp->flags & USING_MSIX_FLAG)
1831 irq = bp->msix_table[0].vector;
1833 irq = bp->pdev->irq;
1835 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1838 static int bnx2x_setup_irqs(struct bnx2x *bp)
1841 if (bp->flags & USING_MSIX_FLAG &&
1842 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1843 rc = bnx2x_req_msix_irqs(bp);
1847 rc = bnx2x_req_irq(bp);
1849 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1852 if (bp->flags & USING_MSI_FLAG) {
1853 bp->dev->irq = bp->pdev->irq;
1854 netdev_info(bp->dev, "using MSI IRQ %d\n",
1857 if (bp->flags & USING_MSIX_FLAG) {
1858 bp->dev->irq = bp->msix_table[0].vector;
1859 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1867 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1871 for_each_rx_queue_cnic(bp, i) {
1872 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1873 napi_enable(&bnx2x_fp(bp, i, napi));
1877 static void bnx2x_napi_enable(struct bnx2x *bp)
1881 for_each_eth_queue(bp, i) {
1882 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1883 napi_enable(&bnx2x_fp(bp, i, napi));
1887 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1891 for_each_rx_queue_cnic(bp, i) {
1892 napi_disable(&bnx2x_fp(bp, i, napi));
1893 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1894 usleep_range(1000, 2000);
1898 static void bnx2x_napi_disable(struct bnx2x *bp)
1902 for_each_eth_queue(bp, i) {
1903 napi_disable(&bnx2x_fp(bp, i, napi));
1904 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1905 usleep_range(1000, 2000);
1909 void bnx2x_netif_start(struct bnx2x *bp)
1911 if (netif_running(bp->dev)) {
1912 bnx2x_napi_enable(bp);
1913 if (CNIC_LOADED(bp))
1914 bnx2x_napi_enable_cnic(bp);
1915 bnx2x_int_enable(bp);
1916 if (bp->state == BNX2X_STATE_OPEN)
1917 netif_tx_wake_all_queues(bp->dev);
1921 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1923 bnx2x_int_disable_sync(bp, disable_hw);
1924 bnx2x_napi_disable(bp);
1925 if (CNIC_LOADED(bp))
1926 bnx2x_napi_disable_cnic(bp);
1929 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1930 void *accel_priv, select_queue_fallback_t fallback)
1932 struct bnx2x *bp = netdev_priv(dev);
1934 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1935 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1936 u16 ether_type = ntohs(hdr->h_proto);
1938 /* Skip VLAN tag if present */
1939 if (ether_type == ETH_P_8021Q) {
1940 struct vlan_ethhdr *vhdr =
1941 (struct vlan_ethhdr *)skb->data;
1943 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1946 /* If ethertype is FCoE or FIP - use FCoE ring */
1947 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1948 return bnx2x_fcoe_tx(bp, txq_index);
1951 /* select a non-FCoE queue */
1952 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1955 void bnx2x_set_num_queues(struct bnx2x *bp)
1958 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1960 /* override in STORAGE SD modes */
1961 if (IS_MF_STORAGE_ONLY(bp))
1962 bp->num_ethernet_queues = 1;
1964 /* Add special queues */
1965 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1966 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1968 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1972 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1974 * @bp: Driver handle
1976 * We currently support for at most 16 Tx queues for each CoS thus we will
1977 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1980 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1981 * index after all ETH L2 indices.
1983 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1984 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1985 * 16..31,...) with indices that are not coupled with any real Tx queue.
1987 * The proper configuration of skb->queue_mapping is handled by
1988 * bnx2x_select_queue() and __skb_tx_hash().
1990 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1991 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1993 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1997 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1998 rx = BNX2X_NUM_ETH_QUEUES(bp);
2000 /* account for fcoe queue */
2001 if (include_cnic && !NO_FCOE(bp)) {
2006 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2008 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2011 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2013 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2017 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2023 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2027 for_each_queue(bp, i) {
2028 struct bnx2x_fastpath *fp = &bp->fp[i];
2031 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2034 * Although there are no IP frames expected to arrive to
2035 * this ring we still want to add an
2036 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2039 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2042 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2043 IP_HEADER_ALIGNMENT_PADDING +
2046 BNX2X_FW_RX_ALIGN_END;
2047 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2048 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2049 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2051 fp->rx_frag_size = 0;
2055 static int bnx2x_init_rss(struct bnx2x *bp)
2058 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2060 /* Prepare the initial contents for the indirection table if RSS is
2063 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2064 bp->rss_conf_obj.ind_table[i] =
2066 ethtool_rxfh_indir_default(i, num_eth_queues);
2069 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2070 * per-port, so if explicit configuration is needed , do it only
2073 * For 57712 and newer on the other hand it's a per-function
2076 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2079 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2080 bool config_hash, bool enable)
2082 struct bnx2x_config_rss_params params = {NULL};
2084 /* Although RSS is meaningless when there is a single HW queue we
2085 * still need it enabled in order to have HW Rx hash generated.
2087 * if (!is_eth_multi(bp))
2088 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2091 params.rss_obj = rss_obj;
2093 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2096 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2098 /* RSS configuration */
2099 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2100 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2101 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2102 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2103 if (rss_obj->udp_rss_v4)
2104 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2105 if (rss_obj->udp_rss_v6)
2106 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2108 if (!CHIP_IS_E1x(bp)) {
2109 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2110 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2111 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2113 /* valid only for TUNN_MODE_GRE tunnel mode */
2114 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2117 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2121 params.rss_result_mask = MULTI_MASK;
2123 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2127 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2128 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2132 return bnx2x_config_rss(bp, ¶ms);
2134 return bnx2x_vfpf_config_rss(bp, ¶ms);
2137 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2139 struct bnx2x_func_state_params func_params = {NULL};
2141 /* Prepare parameters for function state transitions */
2142 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2144 func_params.f_obj = &bp->func_obj;
2145 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2147 func_params.params.hw_init.load_phase = load_code;
2149 return bnx2x_func_state_change(bp, &func_params);
2153 * Cleans the object that have internal lists without sending
2154 * ramrods. Should be run when interrupts are disabled.
2156 void bnx2x_squeeze_objects(struct bnx2x *bp)
2159 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2160 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2161 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2163 /***************** Cleanup MACs' object first *************************/
2165 /* Wait for completion of requested */
2166 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2167 /* Perform a dry cleanup */
2168 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2170 /* Clean ETH primary MAC */
2171 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2172 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2175 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2177 /* Cleanup UC list */
2179 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2180 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2183 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2185 /***************** Now clean mcast object *****************************/
2186 rparam.mcast_obj = &bp->mcast_obj;
2187 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2189 /* Add a DEL command... - Since we're doing a driver cleanup only,
2190 * we take a lock surrounding both the initial send and the CONTs,
2191 * as we don't want a true completion to disrupt us in the middle.
2193 netif_addr_lock_bh(bp->dev);
2194 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2196 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2199 /* ...and wait until all pending commands are cleared */
2200 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2203 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2205 netif_addr_unlock_bh(bp->dev);
2209 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2211 netif_addr_unlock_bh(bp->dev);
2214 #ifndef BNX2X_STOP_ON_ERROR
2215 #define LOAD_ERROR_EXIT(bp, label) \
2217 (bp)->state = BNX2X_STATE_ERROR; \
2221 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2223 bp->cnic_loaded = false; \
2226 #else /*BNX2X_STOP_ON_ERROR*/
2227 #define LOAD_ERROR_EXIT(bp, label) \
2229 (bp)->state = BNX2X_STATE_ERROR; \
2233 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2235 bp->cnic_loaded = false; \
2239 #endif /*BNX2X_STOP_ON_ERROR*/
2241 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2243 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2244 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2248 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2250 int num_groups, vf_headroom = 0;
2251 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2253 /* number of queues for statistics is number of eth queues + FCoE */
2254 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2256 /* Total number of FW statistics requests =
2257 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2258 * and fcoe l2 queue) stats + num of queues (which includes another 1
2259 * for fcoe l2 queue if applicable)
2261 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2263 /* vf stats appear in the request list, but their data is allocated by
2264 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2265 * it is used to determine where to place the vf stats queries in the
2269 vf_headroom = bnx2x_vf_headroom(bp);
2271 /* Request is built from stats_query_header and an array of
2272 * stats_query_cmd_group each of which contains
2273 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2274 * configured in the stats_query_header.
2277 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2278 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2281 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2282 bp->fw_stats_num, vf_headroom, num_groups);
2283 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2284 num_groups * sizeof(struct stats_query_cmd_group);
2286 /* Data for statistics requests + stats_counter
2287 * stats_counter holds per-STORM counters that are incremented
2288 * when STORM has finished with the current request.
2289 * memory for FCoE offloaded statistics are counted anyway,
2290 * even if they will not be sent.
2291 * VF stats are not accounted for here as the data of VF stats is stored
2292 * in memory allocated by the VF, not here.
2294 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2295 sizeof(struct per_pf_stats) +
2296 sizeof(struct fcoe_statistics_params) +
2297 sizeof(struct per_queue_stats) * num_queue_stats +
2298 sizeof(struct stats_counter);
2300 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2301 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2306 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2307 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2308 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2309 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2310 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2311 bp->fw_stats_req_sz;
2313 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2314 U64_HI(bp->fw_stats_req_mapping),
2315 U64_LO(bp->fw_stats_req_mapping));
2316 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2317 U64_HI(bp->fw_stats_data_mapping),
2318 U64_LO(bp->fw_stats_data_mapping));
2322 bnx2x_free_fw_stats_mem(bp);
2323 BNX2X_ERR("Can't allocate FW stats memory\n");
2327 /* send load request to mcp and analyze response */
2328 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2334 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2335 DRV_MSG_SEQ_NUMBER_MASK);
2336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2338 /* Get current FW pulse sequence */
2339 bp->fw_drv_pulse_wr_seq =
2340 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2341 DRV_PULSE_SEQ_MASK);
2342 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2344 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2346 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2347 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2350 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2352 /* if mcp fails to respond we must abort */
2353 if (!(*load_code)) {
2354 BNX2X_ERR("MCP response failure, aborting\n");
2358 /* If mcp refused (e.g. other port is in diagnostic mode) we
2361 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2362 BNX2X_ERR("MCP refused load request, aborting\n");
2368 /* check whether another PF has already loaded FW to chip. In
2369 * virtualized environments a pf from another VM may have already
2370 * initialized the device including loading FW
2372 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2374 /* is another pf loaded on this engine? */
2375 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2376 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2377 /* build my FW version dword */
2380 /* read loaded FW from chip */
2381 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2383 u32 my_fw = ~loaded_fw;
2385 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2388 /* abort nic load if version mismatch */
2389 if (my_fw != loaded_fw) {
2391 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2394 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2402 /* returns the "mcp load_code" according to global load_count array */
2403 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2405 int path = BP_PATH(bp);
2407 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2408 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2409 bnx2x_load_count[path][2]);
2410 bnx2x_load_count[path][0]++;
2411 bnx2x_load_count[path][1 + port]++;
2412 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2413 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2414 bnx2x_load_count[path][2]);
2415 if (bnx2x_load_count[path][0] == 1)
2416 return FW_MSG_CODE_DRV_LOAD_COMMON;
2417 else if (bnx2x_load_count[path][1 + port] == 1)
2418 return FW_MSG_CODE_DRV_LOAD_PORT;
2420 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2423 /* mark PMF if applicable */
2424 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2426 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2427 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2428 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2430 /* We need the barrier to ensure the ordering between the
2431 * writing to bp->port.pmf here and reading it from the
2432 * bnx2x_periodic_task().
2439 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2442 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2444 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2445 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2446 (bp->common.shmem2_base)) {
2447 if (SHMEM2_HAS(bp, dcc_support))
2448 SHMEM2_WR(bp, dcc_support,
2449 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2450 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2451 if (SHMEM2_HAS(bp, afex_driver_support))
2452 SHMEM2_WR(bp, afex_driver_support,
2453 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2456 /* Set AFEX default VLAN tag to an invalid value */
2457 bp->afex_def_vlan_tag = -1;
2461 * bnx2x_bz_fp - zero content of the fastpath structure.
2463 * @bp: driver handle
2464 * @index: fastpath index to be zeroed
2466 * Makes sure the contents of the bp->fp[index].napi is kept
2469 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2471 struct bnx2x_fastpath *fp = &bp->fp[index];
2473 struct napi_struct orig_napi = fp->napi;
2474 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2476 /* bzero bnx2x_fastpath contents */
2478 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2479 sizeof(struct bnx2x_agg_info));
2480 memset(fp, 0, sizeof(*fp));
2482 /* Restore the NAPI object as it has been already initialized */
2483 fp->napi = orig_napi;
2484 fp->tpa_info = orig_tpa_info;
2488 fp->max_cos = bp->max_cos;
2490 /* Special queues support only one CoS */
2493 /* Init txdata pointers */
2495 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2497 for_each_cos_in_tx_queue(fp, cos)
2498 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2499 BNX2X_NUM_ETH_QUEUES(bp) + index];
2501 /* set the tpa flag for each queue. The tpa flag determines the queue
2502 * minimal size so it must be set prior to queue memory allocation
2504 if (bp->dev->features & NETIF_F_LRO)
2505 fp->mode = TPA_MODE_LRO;
2506 else if (bp->dev->features & NETIF_F_GRO &&
2507 bnx2x_mtu_allows_gro(bp->dev->mtu))
2508 fp->mode = TPA_MODE_GRO;
2510 fp->mode = TPA_MODE_DISABLED;
2512 /* We don't want TPA if it's disabled in bp
2513 * or if this is an FCoE L2 ring.
2515 if (bp->disable_tpa || IS_FCOE_FP(fp))
2516 fp->mode = TPA_MODE_DISABLED;
2519 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2523 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2526 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2527 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2530 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2533 int bnx2x_load_cnic(struct bnx2x *bp)
2535 int i, rc, port = BP_PORT(bp);
2537 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2539 mutex_init(&bp->cnic_mutex);
2542 rc = bnx2x_alloc_mem_cnic(bp);
2544 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2545 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2549 rc = bnx2x_alloc_fp_mem_cnic(bp);
2551 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2552 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2555 /* Update the number of queues with the cnic queues */
2556 rc = bnx2x_set_real_num_queues(bp, 1);
2558 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2559 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2562 /* Add all CNIC NAPI objects */
2563 bnx2x_add_all_napi_cnic(bp);
2564 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2565 bnx2x_napi_enable_cnic(bp);
2567 rc = bnx2x_init_hw_func_cnic(bp);
2569 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2571 bnx2x_nic_init_cnic(bp);
2574 /* Enable Timer scan */
2575 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2577 /* setup cnic queues */
2578 for_each_cnic_queue(bp, i) {
2579 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2581 BNX2X_ERR("Queue setup failed\n");
2582 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2587 /* Initialize Rx filter. */
2588 bnx2x_set_rx_mode_inner(bp);
2590 /* re-read iscsi info */
2591 bnx2x_get_iscsi_info(bp);
2592 bnx2x_setup_cnic_irq_info(bp);
2593 bnx2x_setup_cnic_info(bp);
2594 bp->cnic_loaded = true;
2595 if (bp->state == BNX2X_STATE_OPEN)
2596 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2598 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2602 #ifndef BNX2X_STOP_ON_ERROR
2604 /* Disable Timer scan */
2605 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2608 bnx2x_napi_disable_cnic(bp);
2609 /* Update the number of queues without the cnic queues */
2610 if (bnx2x_set_real_num_queues(bp, 0))
2611 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2613 BNX2X_ERR("CNIC-related load failed\n");
2614 bnx2x_free_fp_mem_cnic(bp);
2615 bnx2x_free_mem_cnic(bp);
2617 #endif /* ! BNX2X_STOP_ON_ERROR */
2620 /* must be called with rtnl_lock */
2621 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2623 int port = BP_PORT(bp);
2624 int i, rc = 0, load_code = 0;
2626 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2628 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2630 #ifdef BNX2X_STOP_ON_ERROR
2631 if (unlikely(bp->panic)) {
2632 BNX2X_ERR("Can't load NIC when there is panic\n");
2637 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2639 /* zero the structure w/o any lock, before SP handler is initialized */
2640 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2641 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2642 &bp->last_reported_link.link_report_flags);
2645 /* must be called before memory allocation and HW init */
2646 bnx2x_ilt_set_info(bp);
2649 * Zero fastpath structures preserving invariants like napi, which are
2650 * allocated only once, fp index, max_cos, bp pointer.
2651 * Also set fp->mode and txdata_ptr.
2653 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2654 for_each_queue(bp, i)
2656 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2657 bp->num_cnic_queues) *
2658 sizeof(struct bnx2x_fp_txdata));
2660 bp->fcoe_init = false;
2662 /* Set the receive queues buffer size */
2663 bnx2x_set_rx_buf_size(bp);
2666 rc = bnx2x_alloc_mem(bp);
2668 BNX2X_ERR("Unable to allocate bp memory\n");
2673 /* need to be done after alloc mem, since it's self adjusting to amount
2674 * of memory available for RSS queues
2676 rc = bnx2x_alloc_fp_mem(bp);
2678 BNX2X_ERR("Unable to allocate memory for fps\n");
2679 LOAD_ERROR_EXIT(bp, load_error0);
2682 /* Allocated memory for FW statistics */
2683 if (bnx2x_alloc_fw_stats_mem(bp))
2684 LOAD_ERROR_EXIT(bp, load_error0);
2686 /* request pf to initialize status blocks */
2688 rc = bnx2x_vfpf_init(bp);
2690 LOAD_ERROR_EXIT(bp, load_error0);
2693 /* As long as bnx2x_alloc_mem() may possibly update
2694 * bp->num_queues, bnx2x_set_real_num_queues() should always
2695 * come after it. At this stage cnic queues are not counted.
2697 rc = bnx2x_set_real_num_queues(bp, 0);
2699 BNX2X_ERR("Unable to set real_num_queues\n");
2700 LOAD_ERROR_EXIT(bp, load_error0);
2703 /* configure multi cos mappings in kernel.
2704 * this configuration may be overridden by a multi class queue
2705 * discipline or by a dcbx negotiation result.
2707 bnx2x_setup_tc(bp->dev, bp->max_cos);
2709 /* Add all NAPI objects */
2710 bnx2x_add_all_napi(bp);
2711 DP(NETIF_MSG_IFUP, "napi added\n");
2712 bnx2x_napi_enable(bp);
2715 /* set pf load just before approaching the MCP */
2716 bnx2x_set_pf_load(bp);
2718 /* if mcp exists send load request and analyze response */
2719 if (!BP_NOMCP(bp)) {
2720 /* attempt to load pf */
2721 rc = bnx2x_nic_load_request(bp, &load_code);
2723 LOAD_ERROR_EXIT(bp, load_error1);
2725 /* what did mcp say? */
2726 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2728 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2729 LOAD_ERROR_EXIT(bp, load_error2);
2732 load_code = bnx2x_nic_load_no_mcp(bp, port);
2735 /* mark pmf if applicable */
2736 bnx2x_nic_load_pmf(bp, load_code);
2738 /* Init Function state controlling object */
2739 bnx2x__init_func_obj(bp);
2742 rc = bnx2x_init_hw(bp, load_code);
2744 BNX2X_ERR("HW init failed, aborting\n");
2745 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2746 LOAD_ERROR_EXIT(bp, load_error2);
2750 bnx2x_pre_irq_nic_init(bp);
2752 /* Connect to IRQs */
2753 rc = bnx2x_setup_irqs(bp);
2755 BNX2X_ERR("setup irqs failed\n");
2757 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2758 LOAD_ERROR_EXIT(bp, load_error2);
2761 /* Init per-function objects */
2763 /* Setup NIC internals and enable interrupts */
2764 bnx2x_post_irq_nic_init(bp, load_code);
2766 bnx2x_init_bp_objs(bp);
2767 bnx2x_iov_nic_init(bp);
2769 /* Set AFEX default VLAN tag to an invalid value */
2770 bp->afex_def_vlan_tag = -1;
2771 bnx2x_nic_load_afex_dcc(bp, load_code);
2772 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2773 rc = bnx2x_func_start(bp);
2775 BNX2X_ERR("Function start failed!\n");
2776 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2778 LOAD_ERROR_EXIT(bp, load_error3);
2781 /* Send LOAD_DONE command to MCP */
2782 if (!BP_NOMCP(bp)) {
2783 load_code = bnx2x_fw_command(bp,
2784 DRV_MSG_CODE_LOAD_DONE, 0);
2786 BNX2X_ERR("MCP response failure, aborting\n");
2788 LOAD_ERROR_EXIT(bp, load_error3);
2792 /* initialize FW coalescing state machines in RAM */
2793 bnx2x_update_coalesce(bp);
2796 /* setup the leading queue */
2797 rc = bnx2x_setup_leading(bp);
2799 BNX2X_ERR("Setup leading failed!\n");
2800 LOAD_ERROR_EXIT(bp, load_error3);
2803 /* set up the rest of the queues */
2804 for_each_nondefault_eth_queue(bp, i) {
2806 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2808 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2810 BNX2X_ERR("Queue %d setup failed\n", i);
2811 LOAD_ERROR_EXIT(bp, load_error3);
2816 rc = bnx2x_init_rss(bp);
2818 BNX2X_ERR("PF RSS init failed\n");
2819 LOAD_ERROR_EXIT(bp, load_error3);
2822 /* Now when Clients are configured we are ready to work */
2823 bp->state = BNX2X_STATE_OPEN;
2825 /* Configure a ucast MAC */
2827 rc = bnx2x_set_eth_mac(bp, true);
2829 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2832 BNX2X_ERR("Setting Ethernet MAC failed\n");
2833 LOAD_ERROR_EXIT(bp, load_error3);
2836 if (IS_PF(bp) && bp->pending_max) {
2837 bnx2x_update_max_mf_config(bp, bp->pending_max);
2838 bp->pending_max = 0;
2842 rc = bnx2x_initial_phy_init(bp, load_mode);
2844 LOAD_ERROR_EXIT(bp, load_error3);
2846 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2848 /* Start fast path */
2850 /* Re-configure vlan filters */
2851 rc = bnx2x_vlan_reconfigure_vid(bp);
2853 LOAD_ERROR_EXIT(bp, load_error3);
2855 /* Initialize Rx filter. */
2856 bnx2x_set_rx_mode_inner(bp);
2858 if (bp->flags & PTP_SUPPORTED) {
2860 bnx2x_configure_ptp_filters(bp);
2863 switch (load_mode) {
2865 /* Tx queue should be only re-enabled */
2866 netif_tx_wake_all_queues(bp->dev);
2870 netif_tx_start_all_queues(bp->dev);
2871 smp_mb__after_atomic();
2875 case LOAD_LOOPBACK_EXT:
2876 bp->state = BNX2X_STATE_DIAG;
2884 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2886 bnx2x__link_status_update(bp);
2888 /* start the timer */
2889 mod_timer(&bp->timer, jiffies + bp->current_interval);
2891 if (CNIC_ENABLED(bp))
2892 bnx2x_load_cnic(bp);
2895 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2897 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2898 /* mark driver is loaded in shmem2 */
2900 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2901 val &= ~DRV_FLAGS_MTU_MASK;
2902 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2903 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2904 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2905 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2908 /* Wait for all pending SP commands to complete */
2909 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2910 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2911 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2915 /* Update driver data for On-Chip MFW dump. */
2917 bnx2x_update_mfw_dump(bp);
2919 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2920 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2921 bnx2x_dcbx_init(bp, false);
2923 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2924 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2926 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2930 #ifndef BNX2X_STOP_ON_ERROR
2933 bnx2x_int_disable_sync(bp, 1);
2935 /* Clean queueable objects */
2936 bnx2x_squeeze_objects(bp);
2939 /* Free SKBs, SGEs, TPA pool and driver internals */
2940 bnx2x_free_skbs(bp);
2941 for_each_rx_queue(bp, i)
2942 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2947 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2948 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2949 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2954 bnx2x_napi_disable(bp);
2955 bnx2x_del_all_napi(bp);
2957 /* clear pf_load status, as it was already set */
2959 bnx2x_clear_pf_load(bp);
2961 bnx2x_free_fw_stats_mem(bp);
2962 bnx2x_free_fp_mem(bp);
2966 #endif /* ! BNX2X_STOP_ON_ERROR */
2969 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2973 /* Wait until tx fastpath tasks complete */
2974 for_each_tx_queue(bp, i) {
2975 struct bnx2x_fastpath *fp = &bp->fp[i];
2977 for_each_cos_in_tx_queue(fp, cos)
2978 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2985 /* must be called with rtnl_lock */
2986 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2989 bool global = false;
2991 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2993 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2994 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2996 /* mark driver is unloaded in shmem2 */
2997 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2999 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
3000 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
3001 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
3004 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3005 (bp->state == BNX2X_STATE_CLOSED ||
3006 bp->state == BNX2X_STATE_ERROR)) {
3007 /* We can get here if the driver has been unloaded
3008 * during parity error recovery and is either waiting for a
3009 * leader to complete or for other functions to unload and
3010 * then ifdown has been issued. In this case we want to
3011 * unload and let other functions to complete a recovery
3014 bp->recovery_state = BNX2X_RECOVERY_DONE;
3016 bnx2x_release_leader_lock(bp);
3019 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3020 BNX2X_ERR("Can't unload in closed or error state\n");
3024 /* Nothing to do during unload if previous bnx2x_nic_load()
3025 * have not completed successfully - all resources are released.
3027 * we can get here only after unsuccessful ndo_* callback, during which
3028 * dev->IFF_UP flag is still on.
3030 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3033 /* It's important to set the bp->state to the value different from
3034 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3035 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3037 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3040 /* indicate to VFs that the PF is going down */
3041 bnx2x_iov_channel_down(bp);
3043 if (CNIC_LOADED(bp))
3044 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3047 bnx2x_tx_disable(bp);
3048 netdev_reset_tc(bp->dev);
3050 bp->rx_mode = BNX2X_RX_MODE_NONE;
3052 del_timer_sync(&bp->timer);
3055 /* Set ALWAYS_ALIVE bit in shmem */
3056 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3057 bnx2x_drv_pulse(bp);
3058 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3059 bnx2x_save_statistics(bp);
3062 /* wait till consumers catch up with producers in all queues */
3063 bnx2x_drain_tx_queues(bp);
3065 /* if VF indicate to PF this function is going down (PF will delete sp
3066 * elements and clear initializations
3069 bnx2x_vfpf_close_vf(bp);
3070 else if (unload_mode != UNLOAD_RECOVERY)
3071 /* if this is a normal/close unload need to clean up chip*/
3072 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3074 /* Send the UNLOAD_REQUEST to the MCP */
3075 bnx2x_send_unload_req(bp, unload_mode);
3077 /* Prevent transactions to host from the functions on the
3078 * engine that doesn't reset global blocks in case of global
3079 * attention once global blocks are reset and gates are opened
3080 * (the engine which leader will perform the recovery
3083 if (!CHIP_IS_E1x(bp))
3084 bnx2x_pf_disable(bp);
3086 /* Disable HW interrupts, NAPI */
3087 bnx2x_netif_stop(bp, 1);
3088 /* Delete all NAPI objects */
3089 bnx2x_del_all_napi(bp);
3090 if (CNIC_LOADED(bp))
3091 bnx2x_del_all_napi_cnic(bp);
3095 /* Report UNLOAD_DONE to MCP */
3096 bnx2x_send_unload_done(bp, false);
3100 * At this stage no more interrupts will arrive so we may safely clean
3101 * the queueable objects here in case they failed to get cleaned so far.
3104 bnx2x_squeeze_objects(bp);
3106 /* There should be no more pending SP commands at this stage */
3111 /* clear pending work in rtnl task */
3112 bp->sp_rtnl_state = 0;
3115 /* Free SKBs, SGEs, TPA pool and driver internals */
3116 bnx2x_free_skbs(bp);
3117 if (CNIC_LOADED(bp))
3118 bnx2x_free_skbs_cnic(bp);
3119 for_each_rx_queue(bp, i)
3120 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3122 bnx2x_free_fp_mem(bp);
3123 if (CNIC_LOADED(bp))
3124 bnx2x_free_fp_mem_cnic(bp);
3127 if (CNIC_LOADED(bp))
3128 bnx2x_free_mem_cnic(bp);
3132 bp->state = BNX2X_STATE_CLOSED;
3133 bp->cnic_loaded = false;
3135 /* Clear driver version indication in shmem */
3137 bnx2x_update_mng_version(bp);
3139 /* Check if there are pending parity attentions. If there are - set
3140 * RECOVERY_IN_PROGRESS.
3142 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3143 bnx2x_set_reset_in_progress(bp);
3145 /* Set RESET_IS_GLOBAL if needed */
3147 bnx2x_set_reset_global(bp);
3150 /* The last driver must disable a "close the gate" if there is no
3151 * parity attention or "process kill" pending.
3154 !bnx2x_clear_pf_load(bp) &&
3155 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3156 bnx2x_disable_close_the_gate(bp);
3158 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3163 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3167 /* If there is no power capability, silently succeed */
3168 if (!bp->pdev->pm_cap) {
3169 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3173 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3177 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3178 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3179 PCI_PM_CTRL_PME_STATUS));
3181 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3182 /* delay required during transition out of D3hot */
3187 /* If there are other clients above don't
3188 shut down the power */
3189 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3191 /* Don't shut down the power for emulation and FPGA */
3192 if (CHIP_REV_IS_SLOW(bp))
3195 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3199 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3201 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3204 /* No more memory access after this point until
3205 * device is brought back to D0.
3210 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3217 * net_device service functions
3219 static int bnx2x_poll(struct napi_struct *napi, int budget)
3223 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3225 struct bnx2x *bp = fp->bp;
3228 #ifdef BNX2X_STOP_ON_ERROR
3229 if (unlikely(bp->panic)) {
3230 napi_complete(napi);
3234 if (!bnx2x_fp_lock_napi(fp))
3237 for_each_cos_in_tx_queue(fp, cos)
3238 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3239 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3241 if (bnx2x_has_rx_work(fp)) {
3242 work_done += bnx2x_rx_int(fp, budget - work_done);
3244 /* must not complete if we consumed full budget */
3245 if (work_done >= budget) {
3246 bnx2x_fp_unlock_napi(fp);
3251 bnx2x_fp_unlock_napi(fp);
3253 /* Fall out from the NAPI loop if needed */
3254 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3256 /* No need to update SB for FCoE L2 ring as long as
3257 * it's connected to the default SB and the SB
3258 * has been updated when NAPI was scheduled.
3260 if (IS_FCOE_FP(fp)) {
3261 napi_complete(napi);
3264 bnx2x_update_fpsb_idx(fp);
3265 /* bnx2x_has_rx_work() reads the status block,
3266 * thus we need to ensure that status block indices
3267 * have been actually read (bnx2x_update_fpsb_idx)
3268 * prior to this check (bnx2x_has_rx_work) so that
3269 * we won't write the "newer" value of the status block
3270 * to IGU (if there was a DMA right after
3271 * bnx2x_has_rx_work and if there is no rmb, the memory
3272 * reading (bnx2x_update_fpsb_idx) may be postponed
3273 * to right before bnx2x_ack_sb). In this case there
3274 * will never be another interrupt until there is
3275 * another update of the status block, while there
3276 * is still unhandled work.
3280 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3281 napi_complete(napi);
3282 /* Re-enable interrupts */
3283 DP(NETIF_MSG_RX_STATUS,
3284 "Update index to %d\n", fp->fp_hc_idx);
3285 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3286 le16_to_cpu(fp->fp_hc_idx),
3296 #ifdef CONFIG_NET_RX_BUSY_POLL
3297 /* must be called with local_bh_disable()d */
3298 int bnx2x_low_latency_recv(struct napi_struct *napi)
3300 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3302 struct bnx2x *bp = fp->bp;
3305 if ((bp->state == BNX2X_STATE_CLOSED) ||
3306 (bp->state == BNX2X_STATE_ERROR) ||
3307 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
3308 return LL_FLUSH_FAILED;
3310 if (!bnx2x_fp_lock_poll(fp))
3311 return LL_FLUSH_BUSY;
3313 if (bnx2x_has_rx_work(fp))
3314 found = bnx2x_rx_int(fp, 4);
3316 bnx2x_fp_unlock_poll(fp);
3322 /* we split the first BD into headers and data BDs
3323 * to ease the pain of our fellow microcode engineers
3324 * we use one mapping for both BDs
3326 static u16 bnx2x_tx_split(struct bnx2x *bp,
3327 struct bnx2x_fp_txdata *txdata,
3328 struct sw_tx_bd *tx_buf,
3329 struct eth_tx_start_bd **tx_bd, u16 hlen,
3332 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3333 struct eth_tx_bd *d_tx_bd;
3335 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3337 /* first fix first BD */
3338 h_tx_bd->nbytes = cpu_to_le16(hlen);
3340 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3341 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3343 /* now get a new data BD
3344 * (after the pbd) and fill it */
3345 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3346 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3348 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3349 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3351 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3352 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3353 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3355 /* this marks the BD as one that has no individual mapping */
3356 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3358 DP(NETIF_MSG_TX_QUEUED,
3359 "TSO split data size is %d (%x:%x)\n",
3360 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3363 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3368 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3369 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3370 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3372 __sum16 tsum = (__force __sum16) csum;
3375 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3376 csum_partial(t_header - fix, fix, 0)));
3379 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3380 csum_partial(t_header, -fix, 0)));
3382 return bswab16(tsum);
3385 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3391 if (skb->ip_summed != CHECKSUM_PARTIAL)
3394 protocol = vlan_get_protocol(skb);
3395 if (protocol == htons(ETH_P_IPV6)) {
3397 prot = ipv6_hdr(skb)->nexthdr;
3400 prot = ip_hdr(skb)->protocol;
3403 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3404 if (inner_ip_hdr(skb)->version == 6) {
3405 rc |= XMIT_CSUM_ENC_V6;
3406 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3407 rc |= XMIT_CSUM_TCP;
3409 rc |= XMIT_CSUM_ENC_V4;
3410 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3411 rc |= XMIT_CSUM_TCP;
3414 if (prot == IPPROTO_TCP)
3415 rc |= XMIT_CSUM_TCP;
3417 if (skb_is_gso(skb)) {
3418 if (skb_is_gso_v6(skb)) {
3419 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3420 if (rc & XMIT_CSUM_ENC)
3421 rc |= XMIT_GSO_ENC_V6;
3423 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3424 if (rc & XMIT_CSUM_ENC)
3425 rc |= XMIT_GSO_ENC_V4;
3432 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3433 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3435 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3436 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3438 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3439 /* check if packet requires linearization (packet is too fragmented)
3440 no need to check fragmentation if page size > 8K (there will be no
3441 violation to FW restrictions) */
3442 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3445 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3446 int to_copy = 0, hlen = 0;
3448 if (xmit_type & XMIT_GSO_ENC)
3449 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3451 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3452 if (xmit_type & XMIT_GSO) {
3453 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3454 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3455 /* Number of windows to check */
3456 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3461 /* Headers length */
3462 if (xmit_type & XMIT_GSO_ENC)
3463 hlen = (int)(skb_inner_transport_header(skb) -
3465 inner_tcp_hdrlen(skb);
3467 hlen = (int)(skb_transport_header(skb) -
3468 skb->data) + tcp_hdrlen(skb);
3470 /* Amount of data (w/o headers) on linear part of SKB*/
3471 first_bd_sz = skb_headlen(skb) - hlen;
3473 wnd_sum = first_bd_sz;
3475 /* Calculate the first sum - it's special */
3476 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3478 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3480 /* If there was data on linear skb data - check it */
3481 if (first_bd_sz > 0) {
3482 if (unlikely(wnd_sum < lso_mss)) {
3487 wnd_sum -= first_bd_sz;
3490 /* Others are easier: run through the frag list and
3491 check all windows */
3492 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3494 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3496 if (unlikely(wnd_sum < lso_mss)) {
3501 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3504 /* in non-LSO too fragmented packet should always
3511 if (unlikely(to_copy))
3512 DP(NETIF_MSG_TX_QUEUED,
3513 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3514 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3515 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3522 * bnx2x_set_pbd_gso - update PBD in GSO case.
3526 * @xmit_type: xmit flags
3528 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3529 struct eth_tx_parse_bd_e1x *pbd,
3532 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3533 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3534 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3536 if (xmit_type & XMIT_GSO_V4) {
3537 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3538 pbd->tcp_pseudo_csum =
3539 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3541 0, IPPROTO_TCP, 0));
3543 pbd->tcp_pseudo_csum =
3544 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3545 &ipv6_hdr(skb)->daddr,
3546 0, IPPROTO_TCP, 0));
3550 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3554 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3556 * @bp: driver handle
3558 * @parsing_data: data to be updated
3559 * @xmit_type: xmit flags
3561 * 57712/578xx related, when skb has encapsulation
3563 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3564 u32 *parsing_data, u32 xmit_type)
3567 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3568 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3569 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3571 if (xmit_type & XMIT_CSUM_TCP) {
3572 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3573 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3574 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3576 return skb_inner_transport_header(skb) +
3577 inner_tcp_hdrlen(skb) - skb->data;
3580 /* We support checksum offload for TCP and UDP only.
3581 * No need to pass the UDP header length - it's a constant.
3583 return skb_inner_transport_header(skb) +
3584 sizeof(struct udphdr) - skb->data;
3588 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3590 * @bp: driver handle
3592 * @parsing_data: data to be updated
3593 * @xmit_type: xmit flags
3595 * 57712/578xx related
3597 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3598 u32 *parsing_data, u32 xmit_type)
3601 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3602 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3603 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3605 if (xmit_type & XMIT_CSUM_TCP) {
3606 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3607 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3608 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3610 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3612 /* We support checksum offload for TCP and UDP only.
3613 * No need to pass the UDP header length - it's a constant.
3615 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3618 /* set FW indication according to inner or outer protocols if tunneled */
3619 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3620 struct eth_tx_start_bd *tx_start_bd,
3623 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3625 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3626 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3628 if (!(xmit_type & XMIT_CSUM_TCP))
3629 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3633 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3635 * @bp: driver handle
3637 * @pbd: parse BD to be updated
3638 * @xmit_type: xmit flags
3640 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3641 struct eth_tx_parse_bd_e1x *pbd,
3644 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3646 /* for now NS flag is not used in Linux */
3649 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3650 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3652 pbd->ip_hlen_w = (skb_transport_header(skb) -
3653 skb_network_header(skb)) >> 1;
3655 hlen += pbd->ip_hlen_w;
3657 /* We support checksum offload for TCP and UDP only */
3658 if (xmit_type & XMIT_CSUM_TCP)
3659 hlen += tcp_hdrlen(skb) / 2;
3661 hlen += sizeof(struct udphdr) / 2;
3663 pbd->total_hlen_w = cpu_to_le16(hlen);
3666 if (xmit_type & XMIT_CSUM_TCP) {
3667 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3670 s8 fix = SKB_CS_OFF(skb); /* signed! */
3672 DP(NETIF_MSG_TX_QUEUED,
3673 "hlen %d fix %d csum before fix %x\n",
3674 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3676 /* HW bug: fixup the CSUM */
3677 pbd->tcp_pseudo_csum =
3678 bnx2x_csum_fix(skb_transport_header(skb),
3681 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3682 pbd->tcp_pseudo_csum);
3688 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3689 struct eth_tx_parse_bd_e2 *pbd_e2,
3690 struct eth_tx_parse_2nd_bd *pbd2,
3695 u8 outerip_off, outerip_len = 0;
3697 /* from outer IP to transport */
3698 hlen_w = (skb_inner_transport_header(skb) -
3699 skb_network_header(skb)) >> 1;
3702 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3704 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3706 /* outer IP header info */
3707 if (xmit_type & XMIT_CSUM_V4) {
3708 struct iphdr *iph = ip_hdr(skb);
3709 u32 csum = (__force u32)(~iph->check) -
3710 (__force u32)iph->tot_len -
3711 (__force u32)iph->frag_off;
3713 outerip_len = iph->ihl << 1;
3715 pbd2->fw_ip_csum_wo_len_flags_frag =
3716 bswab16(csum_fold((__force __wsum)csum));
3718 pbd2->fw_ip_hdr_to_payload_w =
3719 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3720 pbd_e2->data.tunnel_data.flags |=
3721 ETH_TUNNEL_DATA_IPV6_OUTER;
3724 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3726 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3728 /* inner IP header info */
3729 if (xmit_type & XMIT_CSUM_ENC_V4) {
3730 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3732 pbd_e2->data.tunnel_data.pseudo_csum =
3733 bswab16(~csum_tcpudp_magic(
3734 inner_ip_hdr(skb)->saddr,
3735 inner_ip_hdr(skb)->daddr,
3736 0, IPPROTO_TCP, 0));
3738 pbd_e2->data.tunnel_data.pseudo_csum =
3739 bswab16(~csum_ipv6_magic(
3740 &inner_ipv6_hdr(skb)->saddr,
3741 &inner_ipv6_hdr(skb)->daddr,
3742 0, IPPROTO_TCP, 0));
3745 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3750 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3751 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3752 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3754 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3755 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3756 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3760 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3763 struct ipv6hdr *ipv6;
3765 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3768 if (xmit_type & XMIT_GSO_ENC_V6)
3769 ipv6 = inner_ipv6_hdr(skb);
3770 else /* XMIT_GSO_V6 */
3771 ipv6 = ipv6_hdr(skb);
3773 if (ipv6->nexthdr == NEXTHDR_IPV6)
3774 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3777 /* called with netif_tx_lock
3778 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3779 * netif_wake_queue()
3781 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3783 struct bnx2x *bp = netdev_priv(dev);
3785 struct netdev_queue *txq;
3786 struct bnx2x_fp_txdata *txdata;
3787 struct sw_tx_bd *tx_buf;
3788 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3789 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3790 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3791 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3792 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3793 u32 pbd_e2_parsing_data = 0;
3794 u16 pkt_prod, bd_prod;
3797 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3800 __le16 pkt_size = 0;
3802 u8 mac_type = UNICAST_ADDRESS;
3804 #ifdef BNX2X_STOP_ON_ERROR
3805 if (unlikely(bp->panic))
3806 return NETDEV_TX_BUSY;
3809 txq_index = skb_get_queue_mapping(skb);
3810 txq = netdev_get_tx_queue(dev, txq_index);
3812 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3814 txdata = &bp->bnx2x_txq[txq_index];
3816 /* enable this debug print to view the transmission queue being used
3817 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3818 txq_index, fp_index, txdata_index); */
3820 /* enable this debug print to view the transmission details
3821 DP(NETIF_MSG_TX_QUEUED,
3822 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3823 txdata->cid, fp_index, txdata_index, txdata, fp); */
3825 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3826 skb_shinfo(skb)->nr_frags +
3828 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3829 /* Handle special storage cases separately */
3830 if (txdata->tx_ring_size == 0) {
3831 struct bnx2x_eth_q_stats *q_stats =
3832 bnx2x_fp_qstats(bp, txdata->parent_fp);
3833 q_stats->driver_filtered_tx_pkt++;
3835 return NETDEV_TX_OK;
3837 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3838 netif_tx_stop_queue(txq);
3839 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3841 return NETDEV_TX_BUSY;
3844 DP(NETIF_MSG_TX_QUEUED,
3845 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3846 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3847 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3850 eth = (struct ethhdr *)skb->data;
3852 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3853 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3854 if (is_broadcast_ether_addr(eth->h_dest))
3855 mac_type = BROADCAST_ADDRESS;
3857 mac_type = MULTICAST_ADDRESS;
3860 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3861 /* First, check if we need to linearize the skb (due to FW
3862 restrictions). No need to check fragmentation if page size > 8K
3863 (there will be no violation to FW restrictions) */
3864 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3865 /* Statistics of linearization */
3867 if (skb_linearize(skb) != 0) {
3868 DP(NETIF_MSG_TX_QUEUED,
3869 "SKB linearization failed - silently dropping this SKB\n");
3870 dev_kfree_skb_any(skb);
3871 return NETDEV_TX_OK;
3875 /* Map skb linear data for DMA */
3876 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3877 skb_headlen(skb), DMA_TO_DEVICE);
3878 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3879 DP(NETIF_MSG_TX_QUEUED,
3880 "SKB mapping failed - silently dropping this SKB\n");
3881 dev_kfree_skb_any(skb);
3882 return NETDEV_TX_OK;
3885 Please read carefully. First we use one BD which we mark as start,
3886 then we have a parsing info BD (used for TSO or xsum),
3887 and only then we have the rest of the TSO BDs.
3888 (don't forget to mark the last one as last,
3889 and to unmap only AFTER you write to the BD ...)
3890 And above all, all pdb sizes are in words - NOT DWORDS!
3893 /* get current pkt produced now - advance it just before sending packet
3894 * since mapping of pages may fail and cause packet to be dropped
3896 pkt_prod = txdata->tx_pkt_prod;
3897 bd_prod = TX_BD(txdata->tx_bd_prod);
3899 /* get a tx_buf and first BD
3900 * tx_start_bd may be changed during SPLIT,
3901 * but first_bd will always stay first
3903 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3904 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3905 first_bd = tx_start_bd;
3907 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3909 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3910 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3911 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3912 } else if (bp->ptp_tx_skb) {
3913 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3915 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3916 /* schedule check for Tx timestamp */
3917 bp->ptp_tx_skb = skb_get(skb);
3918 bp->ptp_tx_start = jiffies;
3919 schedule_work(&bp->ptp_task);
3923 /* header nbd: indirectly zero other flags! */
3924 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3926 /* remember the first BD of the packet */
3927 tx_buf->first_bd = txdata->tx_bd_prod;
3931 DP(NETIF_MSG_TX_QUEUED,
3932 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3933 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3935 if (skb_vlan_tag_present(skb)) {
3936 tx_start_bd->vlan_or_ethertype =
3937 cpu_to_le16(skb_vlan_tag_get(skb));
3938 tx_start_bd->bd_flags.as_bitfield |=
3939 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3941 /* when transmitting in a vf, start bd must hold the ethertype
3942 * for fw to enforce it
3944 #ifndef BNX2X_STOP_ON_ERROR
3947 tx_start_bd->vlan_or_ethertype =
3948 cpu_to_le16(ntohs(eth->h_proto));
3949 #ifndef BNX2X_STOP_ON_ERROR
3951 /* used by FW for packet accounting */
3952 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3956 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3958 /* turn on parsing and get a BD */
3959 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3961 if (xmit_type & XMIT_CSUM)
3962 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3964 if (!CHIP_IS_E1x(bp)) {
3965 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3966 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3968 if (xmit_type & XMIT_CSUM_ENC) {
3969 u16 global_data = 0;
3971 /* Set PBD in enc checksum offload case */
3972 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3973 &pbd_e2_parsing_data,
3976 /* turn on 2nd parsing and get a BD */
3977 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3979 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3981 memset(pbd2, 0, sizeof(*pbd2));
3983 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3984 (skb_inner_network_header(skb) -
3987 if (xmit_type & XMIT_GSO_ENC)
3988 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3992 pbd2->global_data = cpu_to_le16(global_data);
3994 /* add addition parse BD indication to start BD */
3995 SET_FLAG(tx_start_bd->general_data,
3996 ETH_TX_START_BD_PARSE_NBDS, 1);
3997 /* set encapsulation flag in start BD */
3998 SET_FLAG(tx_start_bd->general_data,
3999 ETH_TX_START_BD_TUNNEL_EXIST, 1);
4001 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
4004 } else if (xmit_type & XMIT_CSUM) {
4005 /* Set PBD in checksum offload case w/o encapsulation */
4006 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
4007 &pbd_e2_parsing_data,
4011 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
4012 /* Add the macs to the parsing BD if this is a vf or if
4013 * Tx Switching is enabled.
4016 /* override GRE parameters in BD */
4017 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4018 &pbd_e2->data.mac_addr.src_mid,
4019 &pbd_e2->data.mac_addr.src_lo,
4022 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4023 &pbd_e2->data.mac_addr.dst_mid,
4024 &pbd_e2->data.mac_addr.dst_lo,
4027 if (bp->flags & TX_SWITCHING)
4028 bnx2x_set_fw_mac_addr(
4029 &pbd_e2->data.mac_addr.dst_hi,
4030 &pbd_e2->data.mac_addr.dst_mid,
4031 &pbd_e2->data.mac_addr.dst_lo,
4033 #ifdef BNX2X_STOP_ON_ERROR
4034 /* Enforce security is always set in Stop on Error -
4035 * source mac should be present in the parsing BD
4037 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4038 &pbd_e2->data.mac_addr.src_mid,
4039 &pbd_e2->data.mac_addr.src_lo,
4044 SET_FLAG(pbd_e2_parsing_data,
4045 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4047 u16 global_data = 0;
4048 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4049 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4050 /* Set PBD in checksum offload case */
4051 if (xmit_type & XMIT_CSUM)
4052 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4054 SET_FLAG(global_data,
4055 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4056 pbd_e1x->global_data |= cpu_to_le16(global_data);
4059 /* Setup the data pointer of the first BD of the packet */
4060 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4061 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4062 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4063 pkt_size = tx_start_bd->nbytes;
4065 DP(NETIF_MSG_TX_QUEUED,
4066 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4067 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4068 le16_to_cpu(tx_start_bd->nbytes),
4069 tx_start_bd->bd_flags.as_bitfield,
4070 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4072 if (xmit_type & XMIT_GSO) {
4074 DP(NETIF_MSG_TX_QUEUED,
4075 "TSO packet len %d hlen %d total len %d tso size %d\n",
4076 skb->len, hlen, skb_headlen(skb),
4077 skb_shinfo(skb)->gso_size);
4079 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4081 if (unlikely(skb_headlen(skb) > hlen)) {
4083 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4087 if (!CHIP_IS_E1x(bp))
4088 pbd_e2_parsing_data |=
4089 (skb_shinfo(skb)->gso_size <<
4090 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4091 ETH_TX_PARSE_BD_E2_LSO_MSS;
4093 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4096 /* Set the PBD's parsing_data field if not zero
4097 * (for the chips newer than 57711).
4099 if (pbd_e2_parsing_data)
4100 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4102 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4104 /* Handle fragmented skb */
4105 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4106 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4108 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4109 skb_frag_size(frag), DMA_TO_DEVICE);
4110 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4111 unsigned int pkts_compl = 0, bytes_compl = 0;
4113 DP(NETIF_MSG_TX_QUEUED,
4114 "Unable to map page - dropping packet...\n");
4116 /* we need unmap all buffers already mapped
4118 * first_bd->nbd need to be properly updated
4119 * before call to bnx2x_free_tx_pkt
4121 first_bd->nbd = cpu_to_le16(nbd);
4122 bnx2x_free_tx_pkt(bp, txdata,
4123 TX_BD(txdata->tx_pkt_prod),
4124 &pkts_compl, &bytes_compl);
4125 return NETDEV_TX_OK;
4128 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4129 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4130 if (total_pkt_bd == NULL)
4131 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4133 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4134 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4135 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4136 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4139 DP(NETIF_MSG_TX_QUEUED,
4140 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4141 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4142 le16_to_cpu(tx_data_bd->nbytes));
4145 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4147 /* update with actual num BDs */
4148 first_bd->nbd = cpu_to_le16(nbd);
4150 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4152 /* now send a tx doorbell, counting the next BD
4153 * if the packet contains or ends with it
4155 if (TX_BD_POFF(bd_prod) < nbd)
4158 /* total_pkt_bytes should be set on the first data BD if
4159 * it's not an LSO packet and there is more than one
4160 * data BD. In this case pkt_size is limited by an MTU value.
4161 * However we prefer to set it for an LSO packet (while we don't
4162 * have to) in order to save some CPU cycles in a none-LSO
4163 * case, when we much more care about them.
4165 if (total_pkt_bd != NULL)
4166 total_pkt_bd->total_pkt_bytes = pkt_size;
4169 DP(NETIF_MSG_TX_QUEUED,
4170 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4171 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4172 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4173 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4174 le16_to_cpu(pbd_e1x->total_hlen_w));
4176 DP(NETIF_MSG_TX_QUEUED,
4177 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4179 pbd_e2->data.mac_addr.dst_hi,
4180 pbd_e2->data.mac_addr.dst_mid,
4181 pbd_e2->data.mac_addr.dst_lo,
4182 pbd_e2->data.mac_addr.src_hi,
4183 pbd_e2->data.mac_addr.src_mid,
4184 pbd_e2->data.mac_addr.src_lo,
4185 pbd_e2->parsing_data);
4186 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4188 netdev_tx_sent_queue(txq, skb->len);
4190 skb_tx_timestamp(skb);
4192 txdata->tx_pkt_prod++;
4194 * Make sure that the BD data is updated before updating the producer
4195 * since FW might read the BD right after the producer is updated.
4196 * This is only applicable for weak-ordered memory model archs such
4197 * as IA-64. The following barrier is also mandatory since FW will
4198 * assumes packets must have BDs.
4202 txdata->tx_db.data.prod += nbd;
4205 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4209 txdata->tx_bd_prod += nbd;
4211 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4212 netif_tx_stop_queue(txq);
4214 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4215 * ordering of set_bit() in netif_tx_stop_queue() and read of
4219 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4220 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4221 netif_tx_wake_queue(txq);
4225 return NETDEV_TX_OK;
4228 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4230 int mfw_vn = BP_FW_MB_IDX(bp);
4233 /* If the shmem shouldn't affect configuration, reflect */
4234 if (!IS_MF_BD(bp)) {
4237 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4244 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4245 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4246 c2s_map[0] = tmp & 0xff;
4247 c2s_map[1] = (tmp >> 8) & 0xff;
4248 c2s_map[2] = (tmp >> 16) & 0xff;
4249 c2s_map[3] = (tmp >> 24) & 0xff;
4251 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4252 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4253 c2s_map[4] = tmp & 0xff;
4254 c2s_map[5] = (tmp >> 8) & 0xff;
4255 c2s_map[6] = (tmp >> 16) & 0xff;
4256 c2s_map[7] = (tmp >> 24) & 0xff;
4258 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4259 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4260 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4264 * bnx2x_setup_tc - routine to configure net_device for multi tc
4266 * @netdev: net device to configure
4267 * @tc: number of traffic classes to enable
4269 * callback connected to the ndo_setup_tc function pointer
4271 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4273 struct bnx2x *bp = netdev_priv(dev);
4274 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4275 int cos, prio, count, offset;
4277 /* setup tc must be called under rtnl lock */
4280 /* no traffic classes requested. Aborting */
4282 netdev_reset_tc(dev);
4286 /* requested to support too many traffic classes */
4287 if (num_tc > bp->max_cos) {
4288 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4289 num_tc, bp->max_cos);
4293 /* declare amount of supported traffic classes */
4294 if (netdev_set_num_tc(dev, num_tc)) {
4295 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4299 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4301 /* configure priority to traffic class mapping */
4302 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4303 int outer_prio = c2s_map[prio];
4305 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4306 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4307 "mapping priority %d to tc %d\n",
4308 outer_prio, bp->prio_to_cos[outer_prio]);
4311 /* Use this configuration to differentiate tc0 from other COSes
4312 This can be used for ets or pfc, and save the effort of setting
4313 up a multio class queue disc or negotiating DCBX with a switch
4314 netdev_set_prio_tc_map(dev, 0, 0);
4315 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4316 for (prio = 1; prio < 16; prio++) {
4317 netdev_set_prio_tc_map(dev, prio, 1);
4318 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4321 /* configure traffic class to transmission queue mapping */
4322 for (cos = 0; cos < bp->max_cos; cos++) {
4323 count = BNX2X_NUM_ETH_QUEUES(bp);
4324 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4325 netdev_set_tc_queue(dev, cos, count, offset);
4326 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4327 "mapping tc %d to offset %d count %d\n",
4328 cos, offset, count);
4334 /* called with rtnl_lock */
4335 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4337 struct sockaddr *addr = p;
4338 struct bnx2x *bp = netdev_priv(dev);
4341 if (!is_valid_ether_addr(addr->sa_data)) {
4342 BNX2X_ERR("Requested MAC address is not valid\n");
4346 if (IS_MF_STORAGE_ONLY(bp)) {
4347 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4351 if (netif_running(dev)) {
4352 rc = bnx2x_set_eth_mac(bp, false);
4357 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4359 if (netif_running(dev))
4360 rc = bnx2x_set_eth_mac(bp, true);
4362 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4363 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4368 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4370 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4371 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4376 if (IS_FCOE_IDX(fp_index)) {
4377 memset(sb, 0, sizeof(union host_hc_status_block));
4378 fp->status_blk_mapping = 0;
4381 if (!CHIP_IS_E1x(bp))
4382 BNX2X_PCI_FREE(sb->e2_sb,
4383 bnx2x_fp(bp, fp_index,
4384 status_blk_mapping),
4385 sizeof(struct host_hc_status_block_e2));
4387 BNX2X_PCI_FREE(sb->e1x_sb,
4388 bnx2x_fp(bp, fp_index,
4389 status_blk_mapping),
4390 sizeof(struct host_hc_status_block_e1x));
4394 if (!skip_rx_queue(bp, fp_index)) {
4395 bnx2x_free_rx_bds(fp);
4397 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4398 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4399 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4400 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4401 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4403 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4404 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4405 sizeof(struct eth_fast_path_rx_cqe) *
4409 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4410 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4411 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4412 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4416 if (!skip_tx_queue(bp, fp_index)) {
4417 /* fastpath tx rings: tx_buf tx_desc */
4418 for_each_cos_in_tx_queue(fp, cos) {
4419 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4421 DP(NETIF_MSG_IFDOWN,
4422 "freeing tx memory of fp %d cos %d cid %d\n",
4423 fp_index, cos, txdata->cid);
4425 BNX2X_FREE(txdata->tx_buf_ring);
4426 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4427 txdata->tx_desc_mapping,
4428 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4431 /* end of fastpath */
4434 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4437 for_each_cnic_queue(bp, i)
4438 bnx2x_free_fp_mem_at(bp, i);
4441 void bnx2x_free_fp_mem(struct bnx2x *bp)
4444 for_each_eth_queue(bp, i)
4445 bnx2x_free_fp_mem_at(bp, i);
4448 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4450 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4451 if (!CHIP_IS_E1x(bp)) {
4452 bnx2x_fp(bp, index, sb_index_values) =
4453 (__le16 *)status_blk.e2_sb->sb.index_values;
4454 bnx2x_fp(bp, index, sb_running_index) =
4455 (__le16 *)status_blk.e2_sb->sb.running_index;
4457 bnx2x_fp(bp, index, sb_index_values) =
4458 (__le16 *)status_blk.e1x_sb->sb.index_values;
4459 bnx2x_fp(bp, index, sb_running_index) =
4460 (__le16 *)status_blk.e1x_sb->sb.running_index;
4464 /* Returns the number of actually allocated BDs */
4465 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4468 struct bnx2x *bp = fp->bp;
4469 u16 ring_prod, cqe_ring_prod;
4470 int i, failure_cnt = 0;
4472 fp->rx_comp_cons = 0;
4473 cqe_ring_prod = ring_prod = 0;
4475 /* This routine is called only during fo init so
4476 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4478 for (i = 0; i < rx_ring_size; i++) {
4479 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4483 ring_prod = NEXT_RX_IDX(ring_prod);
4484 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4485 WARN_ON(ring_prod <= (i - failure_cnt));
4489 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4490 i - failure_cnt, fp->index);
4492 fp->rx_bd_prod = ring_prod;
4493 /* Limit the CQE producer by the CQE ring size */
4494 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4496 fp->rx_pkt = fp->rx_calls = 0;
4498 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4500 return i - failure_cnt;
4503 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4507 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4508 struct eth_rx_cqe_next_page *nextpg;
4510 nextpg = (struct eth_rx_cqe_next_page *)
4511 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4513 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4514 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4516 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4517 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4521 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4523 union host_hc_status_block *sb;
4524 struct bnx2x_fastpath *fp = &bp->fp[index];
4527 int rx_ring_size = 0;
4529 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4530 rx_ring_size = MIN_RX_SIZE_NONTPA;
4531 bp->rx_ring_size = rx_ring_size;
4532 } else if (!bp->rx_ring_size) {
4533 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4535 if (CHIP_IS_E3(bp)) {
4536 u32 cfg = SHMEM_RD(bp,
4537 dev_info.port_hw_config[BP_PORT(bp)].
4540 /* Decrease ring size for 1G functions */
4541 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4542 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4546 /* allocate at least number of buffers required by FW */
4547 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4548 MIN_RX_SIZE_TPA, rx_ring_size);
4550 bp->rx_ring_size = rx_ring_size;
4551 } else /* if rx_ring_size specified - use it */
4552 rx_ring_size = bp->rx_ring_size;
4554 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4557 sb = &bnx2x_fp(bp, index, status_blk);
4559 if (!IS_FCOE_IDX(index)) {
4561 if (!CHIP_IS_E1x(bp)) {
4562 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4563 sizeof(struct host_hc_status_block_e2));
4567 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4568 sizeof(struct host_hc_status_block_e1x));
4574 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4575 * set shortcuts for it.
4577 if (!IS_FCOE_IDX(index))
4578 set_sb_shortcuts(bp, index);
4581 if (!skip_tx_queue(bp, index)) {
4582 /* fastpath tx rings: tx_buf tx_desc */
4583 for_each_cos_in_tx_queue(fp, cos) {
4584 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4587 "allocating tx memory of fp %d cos %d\n",
4590 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4591 sizeof(struct sw_tx_bd),
4593 if (!txdata->tx_buf_ring)
4595 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4596 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4597 if (!txdata->tx_desc_ring)
4603 if (!skip_rx_queue(bp, index)) {
4604 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4605 bnx2x_fp(bp, index, rx_buf_ring) =
4606 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4607 if (!bnx2x_fp(bp, index, rx_buf_ring))
4609 bnx2x_fp(bp, index, rx_desc_ring) =
4610 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4611 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4612 if (!bnx2x_fp(bp, index, rx_desc_ring))
4615 /* Seed all CQEs by 1s */
4616 bnx2x_fp(bp, index, rx_comp_ring) =
4617 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4618 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4619 if (!bnx2x_fp(bp, index, rx_comp_ring))
4623 bnx2x_fp(bp, index, rx_page_ring) =
4624 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4626 if (!bnx2x_fp(bp, index, rx_page_ring))
4628 bnx2x_fp(bp, index, rx_sge_ring) =
4629 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4630 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4631 if (!bnx2x_fp(bp, index, rx_sge_ring))
4634 bnx2x_set_next_page_rx_bd(fp);
4637 bnx2x_set_next_page_rx_cq(fp);
4640 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4641 if (ring_size < rx_ring_size)
4647 /* handles low memory cases */
4649 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4651 /* FW will drop all packets if queue is not big enough,
4652 * In these cases we disable the queue
4653 * Min size is different for OOO, TPA and non-TPA queues
4655 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4656 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4657 /* release memory allocated for this queue */
4658 bnx2x_free_fp_mem_at(bp, index);
4664 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4668 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4669 /* we will fail load process instead of mark
4677 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4681 /* 1. Allocate FP for leading - fatal if error
4682 * 2. Allocate RSS - fix number of queues if error
4686 if (bnx2x_alloc_fp_mem_at(bp, 0))
4690 for_each_nondefault_eth_queue(bp, i)
4691 if (bnx2x_alloc_fp_mem_at(bp, i))
4694 /* handle memory failures */
4695 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4696 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4699 bnx2x_shrink_eth_fp(bp, delta);
4700 if (CNIC_SUPPORT(bp))
4701 /* move non eth FPs next to last eth FP
4702 * must be done in that order
4703 * FCOE_IDX < FWD_IDX < OOO_IDX
4706 /* move FCoE fp even NO_FCOE_FLAG is on */
4707 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4708 bp->num_ethernet_queues -= delta;
4709 bp->num_queues = bp->num_ethernet_queues +
4710 bp->num_cnic_queues;
4711 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4712 bp->num_queues + delta, bp->num_queues);
4718 void bnx2x_free_mem_bp(struct bnx2x *bp)
4722 for (i = 0; i < bp->fp_array_size; i++)
4723 kfree(bp->fp[i].tpa_info);
4726 kfree(bp->fp_stats);
4727 kfree(bp->bnx2x_txq);
4728 kfree(bp->msix_table);
4732 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4734 struct bnx2x_fastpath *fp;
4735 struct msix_entry *tbl;
4736 struct bnx2x_ilt *ilt;
4737 int msix_table_size = 0;
4738 int fp_array_size, txq_array_size;
4742 * The biggest MSI-X table we might need is as a maximum number of fast
4743 * path IGU SBs plus default SB (for PF only).
4745 msix_table_size = bp->igu_sb_cnt;
4748 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4750 /* fp array: RSS plus CNIC related L2 queues */
4751 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4752 bp->fp_array_size = fp_array_size;
4753 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4755 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4758 for (i = 0; i < bp->fp_array_size; i++) {
4760 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4761 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4762 if (!(fp[i].tpa_info))
4768 /* allocate sp objs */
4769 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4774 /* allocate fp_stats */
4775 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4780 /* Allocate memory for the transmission queues array */
4782 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4783 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4785 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4791 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4794 bp->msix_table = tbl;
4797 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4804 bnx2x_free_mem_bp(bp);
4808 int bnx2x_reload_if_running(struct net_device *dev)
4810 struct bnx2x *bp = netdev_priv(dev);
4812 if (unlikely(!netif_running(dev)))
4815 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4816 return bnx2x_nic_load(bp, LOAD_NORMAL);
4819 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4821 u32 sel_phy_idx = 0;
4822 if (bp->link_params.num_phys <= 1)
4825 if (bp->link_vars.link_up) {
4826 sel_phy_idx = EXT_PHY1;
4827 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4828 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4829 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4830 sel_phy_idx = EXT_PHY2;
4833 switch (bnx2x_phy_selection(&bp->link_params)) {
4834 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4835 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4836 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4837 sel_phy_idx = EXT_PHY1;
4839 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4840 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4841 sel_phy_idx = EXT_PHY2;
4848 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4850 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4852 * The selected activated PHY is always after swapping (in case PHY
4853 * swapping is enabled). So when swapping is enabled, we need to reverse
4857 if (bp->link_params.multi_phy_config &
4858 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4859 if (sel_phy_idx == EXT_PHY1)
4860 sel_phy_idx = EXT_PHY2;
4861 else if (sel_phy_idx == EXT_PHY2)
4862 sel_phy_idx = EXT_PHY1;
4864 return LINK_CONFIG_IDX(sel_phy_idx);
4867 #ifdef NETDEV_FCOE_WWNN
4868 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4870 struct bnx2x *bp = netdev_priv(dev);
4871 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4874 case NETDEV_FCOE_WWNN:
4875 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4876 cp->fcoe_wwn_node_name_lo);
4878 case NETDEV_FCOE_WWPN:
4879 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4880 cp->fcoe_wwn_port_name_lo);
4883 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4891 /* called with rtnl_lock */
4892 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4894 struct bnx2x *bp = netdev_priv(dev);
4896 if (pci_num_vf(bp->pdev)) {
4897 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4901 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4902 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4906 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4907 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4908 BNX2X_ERR("Can't support requested MTU size\n");
4912 /* This does not race with packet allocation
4913 * because the actual alloc size is
4914 * only updated as part of load
4918 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4919 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4921 return bnx2x_reload_if_running(dev);
4924 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4925 netdev_features_t features)
4927 struct bnx2x *bp = netdev_priv(dev);
4929 if (pci_num_vf(bp->pdev)) {
4930 netdev_features_t changed = dev->features ^ features;
4932 /* Revert the requested changes in features if they
4933 * would require internal reload of PF in bnx2x_set_features().
4935 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4936 features &= ~NETIF_F_RXCSUM;
4937 features |= dev->features & NETIF_F_RXCSUM;
4940 if (changed & NETIF_F_LOOPBACK) {
4941 features &= ~NETIF_F_LOOPBACK;
4942 features |= dev->features & NETIF_F_LOOPBACK;
4946 /* TPA requires Rx CSUM offloading */
4947 if (!(features & NETIF_F_RXCSUM)) {
4948 features &= ~NETIF_F_LRO;
4949 features &= ~NETIF_F_GRO;
4955 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4957 struct bnx2x *bp = netdev_priv(dev);
4958 netdev_features_t changes = features ^ dev->features;
4959 bool bnx2x_reload = false;
4962 /* VFs or non SRIOV PFs should be able to change loopback feature */
4963 if (!pci_num_vf(bp->pdev)) {
4964 if (features & NETIF_F_LOOPBACK) {
4965 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4966 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4967 bnx2x_reload = true;
4970 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4971 bp->link_params.loopback_mode = LOOPBACK_NONE;
4972 bnx2x_reload = true;
4977 /* if GRO is changed while LRO is enabled, don't force a reload */
4978 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4979 changes &= ~NETIF_F_GRO;
4981 /* if GRO is changed while HW TPA is off, don't force a reload */
4982 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4983 changes &= ~NETIF_F_GRO;
4986 bnx2x_reload = true;
4989 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4990 dev->features = features;
4991 rc = bnx2x_reload_if_running(dev);
4994 /* else: bnx2x_nic_load() will be called at end of recovery */
5000 void bnx2x_tx_timeout(struct net_device *dev)
5002 struct bnx2x *bp = netdev_priv(dev);
5004 #ifdef BNX2X_STOP_ON_ERROR
5009 /* This allows the netif to be shutdown gracefully before resetting */
5010 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
5013 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
5015 struct net_device *dev = pci_get_drvdata(pdev);
5019 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5022 bp = netdev_priv(dev);
5026 pci_save_state(pdev);
5028 if (!netif_running(dev)) {
5033 netif_device_detach(dev);
5035 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5037 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5044 int bnx2x_resume(struct pci_dev *pdev)
5046 struct net_device *dev = pci_get_drvdata(pdev);
5051 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5054 bp = netdev_priv(dev);
5056 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5057 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5063 pci_restore_state(pdev);
5065 if (!netif_running(dev)) {
5070 bnx2x_set_power_state(bp, PCI_D0);
5071 netif_device_attach(dev);
5073 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5080 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5084 BNX2X_ERR("bad context pointer %p\n", cxt);
5088 /* ustorm cxt validation */
5089 cxt->ustorm_ag_context.cdu_usage =
5090 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5091 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5092 /* xcontext validation */
5093 cxt->xstorm_ag_context.cdu_reserved =
5094 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5095 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5098 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5099 u8 fw_sb_id, u8 sb_index,
5102 u32 addr = BAR_CSTRORM_INTMEM +
5103 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5104 REG_WR8(bp, addr, ticks);
5106 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5107 port, fw_sb_id, sb_index, ticks);
5110 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5111 u16 fw_sb_id, u8 sb_index,
5114 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5115 u32 addr = BAR_CSTRORM_INTMEM +
5116 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5117 u8 flags = REG_RD8(bp, addr);
5119 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5120 flags |= enable_flag;
5121 REG_WR8(bp, addr, flags);
5123 "port %x fw_sb_id %d sb_index %d disable %d\n",
5124 port, fw_sb_id, sb_index, disable);
5127 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5128 u8 sb_index, u8 disable, u16 usec)
5130 int port = BP_PORT(bp);
5131 u8 ticks = usec / BNX2X_BTR;
5133 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5135 disable = disable ? 1 : (usec ? 0 : 1);
5136 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5139 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5142 smp_mb__before_atomic();
5143 set_bit(flag, &bp->sp_rtnl_state);
5144 smp_mb__after_atomic();
5145 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5147 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5149 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);