1 /* bnx2x_cmn.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
26 #include <linux/crash_dump.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
49 napi_hash_add(&bnx2x_fp(bp, i, napi));
53 static void bnx2x_add_all_napi(struct bnx2x *bp)
57 /* Add NAPI objects */
58 for_each_eth_queue(bp, i) {
59 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
60 bnx2x_poll, NAPI_POLL_WEIGHT);
61 napi_hash_add(&bnx2x_fp(bp, i, napi));
65 static int bnx2x_calc_num_queues(struct bnx2x *bp)
67 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
69 /* Reduce memory usage in kdump environment by using only one queue */
70 if (is_kdump_kernel())
73 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
78 * bnx2x_move_fp - move content of the fastpath structure.
81 * @from: source FP index
82 * @to: destination FP index
84 * Makes sure the contents of the bp->fp[to].napi is kept
85 * intact. This is done by first copying the napi struct from
86 * the target to the source, and then mem copying the entire
87 * source onto the target. Update txdata pointers and related
90 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
92 struct bnx2x_fastpath *from_fp = &bp->fp[from];
93 struct bnx2x_fastpath *to_fp = &bp->fp[to];
94 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
95 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
96 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
97 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
98 int old_max_eth_txqs, new_max_eth_txqs;
99 int old_txdata_index = 0, new_txdata_index = 0;
100 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
102 /* Copy the NAPI object as it has been already initialized */
103 from_fp->napi = to_fp->napi;
105 /* Move bnx2x_fastpath contents */
106 memcpy(to_fp, from_fp, sizeof(*to_fp));
109 /* Retain the tpa_info of the original `to' version as we don't want
110 * 2 FPs to contain the same tpa_info pointer.
112 to_fp->tpa_info = old_tpa_info;
114 /* move sp_objs contents as well, as their indices match fp ones */
115 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
117 /* move fp_stats contents as well, as their indices match fp ones */
118 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
120 /* Update txdata pointers in fp and move txdata content accordingly:
121 * Each fp consumes 'max_cos' txdata structures, so the index should be
122 * decremented by max_cos x delta.
125 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
126 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
128 if (from == FCOE_IDX(bp)) {
129 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
133 memcpy(&bp->bnx2x_txq[new_txdata_index],
134 &bp->bnx2x_txq[old_txdata_index],
135 sizeof(struct bnx2x_fp_txdata));
136 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
140 * bnx2x_fill_fw_str - Fill buffer with FW version string.
143 * @buf: character buffer to fill with the fw name
144 * @buf_len: length of the above buffer
147 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
150 u8 phy_fw_ver[PHY_FW_VER_LEN];
152 phy_fw_ver[0] = '\0';
153 bnx2x_get_ext_phy_fw_version(&bp->link_params,
154 phy_fw_ver, PHY_FW_VER_LEN);
155 strlcpy(buf, bp->fw_ver, buf_len);
156 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
158 (bp->common.bc_ver & 0xff0000) >> 16,
159 (bp->common.bc_ver & 0xff00) >> 8,
160 (bp->common.bc_ver & 0xff),
161 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
163 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
168 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
171 * @delta: number of eth queues which were not allocated
173 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
175 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
177 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
178 * backward along the array could cause memory to be overridden
180 for (cos = 1; cos < bp->max_cos; cos++) {
181 for (i = 0; i < old_eth_num - delta; i++) {
182 struct bnx2x_fastpath *fp = &bp->fp[i];
183 int new_idx = cos * (old_eth_num - delta) + i;
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
186 sizeof(struct bnx2x_fp_txdata));
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
192 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
194 /* free skb in the packet ring at pos idx
195 * return idx of last bd freed
197 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
198 u16 idx, unsigned int *pkts_compl,
199 unsigned int *bytes_compl)
201 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
202 struct eth_tx_start_bd *tx_start_bd;
203 struct eth_tx_bd *tx_data_bd;
204 struct sk_buff *skb = tx_buf->skb;
205 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
207 u16 split_bd_len = 0;
209 /* prefetch skb end pointer to speedup dev_kfree_skb() */
212 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
213 txdata->txq_index, idx, tx_buf, skb);
215 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
217 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
218 #ifdef BNX2X_STOP_ON_ERROR
219 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
220 BNX2X_ERR("BAD nbd!\n");
224 new_cons = nbd + tx_buf->first_bd;
226 /* Get the next bd */
227 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229 /* Skip a parse bd... */
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
233 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
234 /* Skip second parse bd... */
236 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
239 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
240 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
241 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
242 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
244 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
248 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
249 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
255 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
256 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
257 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
259 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
266 (*bytes_compl) += skb->len;
267 dev_kfree_skb_any(skb);
270 tx_buf->first_bd = 0;
276 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
278 struct netdev_queue *txq;
279 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
280 unsigned int pkts_compl = 0, bytes_compl = 0;
282 #ifdef BNX2X_STOP_ON_ERROR
283 if (unlikely(bp->panic))
287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
288 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
289 sw_cons = txdata->tx_pkt_cons;
291 /* Ensure subsequent loads occur after hw_cons */
294 while (sw_cons != hw_cons) {
297 pkt_cons = TX_BD(sw_cons);
299 DP(NETIF_MSG_TX_DONE,
300 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
301 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
303 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
304 &pkts_compl, &bytes_compl);
309 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
311 txdata->tx_pkt_cons = sw_cons;
312 txdata->tx_bd_cons = bd_cons;
314 /* Need to make the tx_bd_cons update visible to start_xmit()
315 * before checking for netif_tx_queue_stopped(). Without the
316 * memory barrier, there is a small possibility that
317 * start_xmit() will miss it and cause the queue to be stopped
319 * On the other hand we need an rmb() here to ensure the proper
320 * ordering of bit testing in the following
321 * netif_tx_queue_stopped(txq) call.
325 if (unlikely(netif_tx_queue_stopped(txq))) {
326 /* Taking tx_lock() is needed to prevent re-enabling the queue
327 * while it's empty. This could have happen if rx_action() gets
328 * suspended in bnx2x_tx_int() after the condition before
329 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
331 * stops the queue->sees fresh tx_bd_cons->releases the queue->
332 * sends some packets consuming the whole queue again->
336 __netif_tx_lock(txq, smp_processor_id());
338 if ((netif_tx_queue_stopped(txq)) &&
339 (bp->state == BNX2X_STATE_OPEN) &&
340 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
341 netif_tx_wake_queue(txq);
343 __netif_tx_unlock(txq);
348 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
351 u16 last_max = fp->last_max_sge;
353 if (SUB_S16(idx, last_max) > 0)
354 fp->last_max_sge = idx;
357 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
359 struct eth_end_agg_rx_cqe *cqe)
361 struct bnx2x *bp = fp->bp;
362 u16 last_max, last_elem, first_elem;
369 /* First mark all used pages */
370 for (i = 0; i < sge_len; i++)
371 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
372 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
374 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
375 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
377 /* Here we assume that the last SGE index is the biggest */
378 prefetch((void *)(fp->sge_mask));
379 bnx2x_update_last_max_sge(fp,
380 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
382 last_max = RX_SGE(fp->last_max_sge);
383 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
384 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
386 /* If ring is not full */
387 if (last_elem + 1 != first_elem)
390 /* Now update the prod */
391 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
392 if (likely(fp->sge_mask[i]))
395 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
396 delta += BIT_VEC64_ELEM_SZ;
400 fp->rx_sge_prod += delta;
401 /* clear page-end entries */
402 bnx2x_clear_sge_mask_next_elems(fp);
405 DP(NETIF_MSG_RX_STATUS,
406 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
407 fp->last_max_sge, fp->rx_sge_prod);
410 /* Get Toeplitz hash value in the skb using the value from the
411 * CQE (calculated by HW).
413 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
414 const struct eth_fast_path_rx_cqe *cqe,
415 enum pkt_hash_types *rxhash_type)
417 /* Get Toeplitz hash from CQE */
418 if ((bp->dev->features & NETIF_F_RXHASH) &&
419 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
420 enum eth_rss_hash_type htype;
422 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
423 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
424 (htype == TCP_IPV6_HASH_TYPE)) ?
425 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
427 return le32_to_cpu(cqe->rss_hash_result);
429 *rxhash_type = PKT_HASH_TYPE_NONE;
433 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
435 struct eth_fast_path_rx_cqe *cqe)
437 struct bnx2x *bp = fp->bp;
438 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
439 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
440 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
442 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
443 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
445 /* print error if current state != stop */
446 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
447 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
449 /* Try to map an empty data buffer from the aggregation info */
450 mapping = dma_map_single(&bp->pdev->dev,
451 first_buf->data + NET_SKB_PAD,
452 fp->rx_buf_size, DMA_FROM_DEVICE);
454 * ...if it fails - move the skb from the consumer to the producer
455 * and set the current aggregation state as ERROR to drop it
456 * when TPA_STOP arrives.
459 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
460 /* Move the BD from the consumer to the producer */
461 bnx2x_reuse_rx_data(fp, cons, prod);
462 tpa_info->tpa_state = BNX2X_TPA_ERROR;
466 /* move empty data from pool to prod */
467 prod_rx_buf->data = first_buf->data;
468 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
469 /* point prod_bd to new data */
470 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
471 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
473 /* move partial skb from cons to pool (don't unmap yet) */
474 *first_buf = *cons_rx_buf;
476 /* mark bin state as START */
477 tpa_info->parsing_flags =
478 le16_to_cpu(cqe->pars_flags.flags);
479 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
480 tpa_info->tpa_state = BNX2X_TPA_START;
481 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
482 tpa_info->placement_offset = cqe->placement_offset;
483 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
484 if (fp->mode == TPA_MODE_GRO) {
485 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
486 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
487 tpa_info->gro_size = gro_size;
490 #ifdef BNX2X_STOP_ON_ERROR
491 fp->tpa_queue_used |= (1 << queue);
492 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
497 /* Timestamp option length allowed for TPA aggregation:
499 * nop nop kind length echo val
501 #define TPA_TSTAMP_OPT_LEN 12
503 * bnx2x_set_gro_params - compute GRO values
506 * @parsing_flags: parsing flags from the START CQE
507 * @len_on_bd: total length of the first packet for the
509 * @pkt_len: length of all segments
511 * Approximate value of the MSS for this aggregation calculated using
512 * the first packet of it.
513 * Compute number of aggregated segments, and gso_type.
515 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
516 u16 len_on_bd, unsigned int pkt_len,
517 u16 num_of_coalesced_segs)
519 /* TPA aggregation won't have either IP options or TCP options
520 * other than timestamp or IPv6 extension headers.
522 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
524 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
525 PRS_FLAG_OVERETH_IPV6) {
526 hdrs_len += sizeof(struct ipv6hdr);
527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
529 hdrs_len += sizeof(struct iphdr);
530 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
533 /* Check if there was a TCP timestamp, if there is it's will
534 * always be 12 bytes length: nop nop kind length echo val.
536 * Otherwise FW would close the aggregation.
538 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
539 hdrs_len += TPA_TSTAMP_OPT_LEN;
541 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
543 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
544 * to skb_shinfo(skb)->gso_segs
546 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
549 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
550 u16 index, gfp_t gfp_mask)
552 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
553 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
554 struct bnx2x_alloc_pool *pool = &fp->page_pool;
557 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
559 /* put page reference used by the memory pool, since we
560 * won't be using this page as the mempool anymore.
563 put_page(pool->page);
565 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
566 if (unlikely(!pool->page)) {
567 BNX2X_ERR("Can't alloc sge\n");
574 mapping = dma_map_page(&bp->pdev->dev, pool->page,
575 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
576 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
577 BNX2X_ERR("Can't map sge\n");
581 get_page(pool->page);
582 sw_buf->page = pool->page;
583 sw_buf->offset = pool->offset;
585 dma_unmap_addr_set(sw_buf, mapping, mapping);
587 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
588 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
590 pool->offset += SGE_PAGE_SIZE;
595 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
596 struct bnx2x_agg_info *tpa_info,
599 struct eth_end_agg_rx_cqe *cqe,
602 struct sw_rx_page *rx_pg, old_rx_pg;
603 u32 i, frag_len, frag_size;
604 int err, j, frag_id = 0;
605 u16 len_on_bd = tpa_info->len_on_bd;
606 u16 full_page = 0, gro_size = 0;
608 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
610 if (fp->mode == TPA_MODE_GRO) {
611 gro_size = tpa_info->gro_size;
612 full_page = tpa_info->full_page;
615 /* This is needed in order to enable forwarding support */
617 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
618 le16_to_cpu(cqe->pkt_len),
619 le16_to_cpu(cqe->num_of_coalesced_segs));
621 #ifdef BNX2X_STOP_ON_ERROR
622 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
623 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
625 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
631 /* Run through the SGL and compose the fragmented skb */
632 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
633 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
635 /* FW gives the indices of the SGE as if the ring is an array
636 (meaning that "next" element will consume 2 indices) */
637 if (fp->mode == TPA_MODE_GRO)
638 frag_len = min_t(u32, frag_size, (u32)full_page);
640 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
642 rx_pg = &fp->rx_page_ring[sge_idx];
645 /* If we fail to allocate a substitute page, we simply stop
646 where we are and drop the whole packet */
647 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
649 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
653 dma_unmap_page(&bp->pdev->dev,
654 dma_unmap_addr(&old_rx_pg, mapping),
655 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
656 /* Add one frag and update the appropriate fields in the skb */
657 if (fp->mode == TPA_MODE_LRO)
658 skb_fill_page_desc(skb, j, old_rx_pg.page,
659 old_rx_pg.offset, frag_len);
663 for (rem = frag_len; rem > 0; rem -= gro_size) {
664 int len = rem > gro_size ? gro_size : rem;
665 skb_fill_page_desc(skb, frag_id++,
667 old_rx_pg.offset + offset,
670 get_page(old_rx_pg.page);
675 skb->data_len += frag_len;
676 skb->truesize += SGE_PAGES;
677 skb->len += frag_len;
679 frag_size -= frag_len;
685 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
687 if (fp->rx_frag_size)
693 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
695 if (fp->rx_frag_size) {
696 /* GFP_KERNEL allocations are used only during initialization */
697 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
698 return (void *)__get_free_page(gfp_mask);
700 return netdev_alloc_frag(fp->rx_frag_size);
703 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
707 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
709 const struct iphdr *iph = ip_hdr(skb);
712 skb_set_transport_header(skb, sizeof(struct iphdr));
715 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
716 iph->saddr, iph->daddr, 0);
719 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
721 struct ipv6hdr *iph = ipv6_hdr(skb);
724 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
727 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
728 &iph->saddr, &iph->daddr, 0);
731 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
732 void (*gro_func)(struct bnx2x*, struct sk_buff*))
734 skb_set_network_header(skb, 0);
736 tcp_gro_complete(skb);
740 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
744 if (skb_shinfo(skb)->gso_size) {
745 switch (be16_to_cpu(skb->protocol)) {
747 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
750 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
753 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
754 be16_to_cpu(skb->protocol));
758 skb_record_rx_queue(skb, fp->rx_queue);
759 napi_gro_receive(&fp->napi, skb);
762 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
763 struct bnx2x_agg_info *tpa_info,
765 struct eth_end_agg_rx_cqe *cqe,
768 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
769 u8 pad = tpa_info->placement_offset;
770 u16 len = tpa_info->len_on_bd;
771 struct sk_buff *skb = NULL;
772 u8 *new_data, *data = rx_buf->data;
773 u8 old_tpa_state = tpa_info->tpa_state;
775 tpa_info->tpa_state = BNX2X_TPA_STOP;
777 /* If we there was an error during the handling of the TPA_START -
778 * drop this aggregation.
780 if (old_tpa_state == BNX2X_TPA_ERROR)
783 /* Try to allocate the new data */
784 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
785 /* Unmap skb in the pool anyway, as we are going to change
786 pool entry status to BNX2X_TPA_STOP even if new skb allocation
788 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
789 fp->rx_buf_size, DMA_FROM_DEVICE);
790 if (likely(new_data))
791 skb = build_skb(data, fp->rx_frag_size);
794 #ifdef BNX2X_STOP_ON_ERROR
795 if (pad + len > fp->rx_buf_size) {
796 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
797 pad, len, fp->rx_buf_size);
803 skb_reserve(skb, pad + NET_SKB_PAD);
805 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
807 skb->protocol = eth_type_trans(skb, bp->dev);
808 skb->ip_summed = CHECKSUM_UNNECESSARY;
810 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
811 skb, cqe, cqe_idx)) {
812 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
813 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
814 bnx2x_gro_receive(bp, fp, skb);
816 DP(NETIF_MSG_RX_STATUS,
817 "Failed to allocate new pages - dropping packet!\n");
818 dev_kfree_skb_any(skb);
821 /* put new data in bin */
822 rx_buf->data = new_data;
827 bnx2x_frag_free(fp, new_data);
829 /* drop the packet and keep the buffer in the bin */
830 DP(NETIF_MSG_RX_STATUS,
831 "Failed to allocate or map a new skb - dropping packet!\n");
832 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
835 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
836 u16 index, gfp_t gfp_mask)
839 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
840 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
843 data = bnx2x_frag_alloc(fp, gfp_mask);
844 if (unlikely(data == NULL))
847 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
850 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
851 bnx2x_frag_free(fp, data);
852 BNX2X_ERR("Can't map rx data\n");
857 dma_unmap_addr_set(rx_buf, mapping, mapping);
859 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
860 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
866 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
867 struct bnx2x_fastpath *fp,
868 struct bnx2x_eth_q_stats *qstats)
870 /* Do nothing if no L4 csum validation was done.
871 * We do not check whether IP csum was validated. For IPv4 we assume
872 * that if the card got as far as validating the L4 csum, it also
873 * validated the IP csum. IPv6 has no IP csum.
875 if (cqe->fast_path_cqe.status_flags &
876 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
879 /* If L4 validation was done, check if an error was found. */
881 if (cqe->fast_path_cqe.type_error_flags &
882 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
883 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
884 qstats->hw_csum_err++;
886 skb->ip_summed = CHECKSUM_UNNECESSARY;
889 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
891 struct bnx2x *bp = fp->bp;
892 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
893 u16 sw_comp_cons, sw_comp_prod;
895 union eth_rx_cqe *cqe;
896 struct eth_fast_path_rx_cqe *cqe_fp;
898 #ifdef BNX2X_STOP_ON_ERROR
899 if (unlikely(bp->panic))
905 bd_cons = fp->rx_bd_cons;
906 bd_prod = fp->rx_bd_prod;
907 bd_prod_fw = bd_prod;
908 sw_comp_cons = fp->rx_comp_cons;
909 sw_comp_prod = fp->rx_comp_prod;
911 comp_ring_cons = RCQ_BD(sw_comp_cons);
912 cqe = &fp->rx_comp_ring[comp_ring_cons];
913 cqe_fp = &cqe->fast_path_cqe;
915 DP(NETIF_MSG_RX_STATUS,
916 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
918 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
919 struct sw_rx_bd *rx_buf = NULL;
922 enum eth_rx_cqe_type cqe_fp_type;
926 enum pkt_hash_types rxhash_type;
928 #ifdef BNX2X_STOP_ON_ERROR
929 if (unlikely(bp->panic))
933 bd_prod = RX_BD(bd_prod);
934 bd_cons = RX_BD(bd_cons);
936 /* A rmb() is required to ensure that the CQE is not read
937 * before it is written by the adapter DMA. PCI ordering
938 * rules will make sure the other fields are written before
939 * the marker at the end of struct eth_fast_path_rx_cqe
940 * but without rmb() a weakly ordered processor can process
941 * stale data. Without the barrier TPA state-machine might
942 * enter inconsistent state and kernel stack might be
943 * provided with incorrect packet description - these lead
944 * to various kernel crashed.
948 cqe_fp_flags = cqe_fp->type_error_flags;
949 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
951 DP(NETIF_MSG_RX_STATUS,
952 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
953 CQE_TYPE(cqe_fp_flags),
954 cqe_fp_flags, cqe_fp->status_flags,
955 le32_to_cpu(cqe_fp->rss_hash_result),
956 le16_to_cpu(cqe_fp->vlan_tag),
957 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
959 /* is this a slowpath msg? */
960 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
961 bnx2x_sp_event(fp, cqe);
965 rx_buf = &fp->rx_buf_ring[bd_cons];
968 if (!CQE_TYPE_FAST(cqe_fp_type)) {
969 struct bnx2x_agg_info *tpa_info;
970 u16 frag_size, pages;
971 #ifdef BNX2X_STOP_ON_ERROR
973 if (fp->mode == TPA_MODE_DISABLED &&
974 (CQE_TYPE_START(cqe_fp_type) ||
975 CQE_TYPE_STOP(cqe_fp_type)))
976 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
977 CQE_TYPE(cqe_fp_type));
980 if (CQE_TYPE_START(cqe_fp_type)) {
981 u16 queue = cqe_fp->queue_index;
982 DP(NETIF_MSG_RX_STATUS,
983 "calling tpa_start on queue %d\n",
986 bnx2x_tpa_start(fp, queue,
992 queue = cqe->end_agg_cqe.queue_index;
993 tpa_info = &fp->tpa_info[queue];
994 DP(NETIF_MSG_RX_STATUS,
995 "calling tpa_stop on queue %d\n",
998 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
1001 if (fp->mode == TPA_MODE_GRO)
1002 pages = (frag_size + tpa_info->full_page - 1) /
1003 tpa_info->full_page;
1005 pages = SGE_PAGE_ALIGN(frag_size) >>
1008 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1009 &cqe->end_agg_cqe, comp_ring_cons);
1010 #ifdef BNX2X_STOP_ON_ERROR
1015 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1019 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1020 pad = cqe_fp->placement_offset;
1021 dma_sync_single_for_cpu(&bp->pdev->dev,
1022 dma_unmap_addr(rx_buf, mapping),
1023 pad + RX_COPY_THRESH,
1026 prefetch(data + pad); /* speedup eth_type_trans() */
1027 /* is this an error packet? */
1028 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1029 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1030 "ERROR flags %x rx packet %u\n",
1031 cqe_fp_flags, sw_comp_cons);
1032 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1036 /* Since we don't have a jumbo ring
1037 * copy small packets if mtu > 1500
1039 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1040 (len <= RX_COPY_THRESH)) {
1041 skb = napi_alloc_skb(&fp->napi, len);
1043 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1044 "ERROR packet dropped because of alloc failure\n");
1045 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1048 memcpy(skb->data, data + pad, len);
1049 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1051 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1052 GFP_ATOMIC) == 0)) {
1053 dma_unmap_single(&bp->pdev->dev,
1054 dma_unmap_addr(rx_buf, mapping),
1057 skb = build_skb(data, fp->rx_frag_size);
1058 if (unlikely(!skb)) {
1059 bnx2x_frag_free(fp, data);
1060 bnx2x_fp_qstats(bp, fp)->
1061 rx_skb_alloc_failed++;
1064 skb_reserve(skb, pad);
1066 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1067 "ERROR packet dropped because of alloc failure\n");
1068 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1070 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1076 skb->protocol = eth_type_trans(skb, bp->dev);
1078 /* Set Toeplitz hash for a none-LRO skb */
1079 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1080 skb_set_hash(skb, rxhash, rxhash_type);
1082 skb_checksum_none_assert(skb);
1084 if (bp->dev->features & NETIF_F_RXCSUM)
1085 bnx2x_csum_validate(skb, cqe, fp,
1086 bnx2x_fp_qstats(bp, fp));
1088 skb_record_rx_queue(skb, fp->rx_queue);
1090 /* Check if this packet was timestamped */
1091 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1092 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1093 bnx2x_set_rx_ts(bp, skb);
1095 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1097 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1098 le16_to_cpu(cqe_fp->vlan_tag));
1100 skb_mark_napi_id(skb, &fp->napi);
1102 if (bnx2x_fp_ll_polling(fp))
1103 netif_receive_skb(skb);
1105 napi_gro_receive(&fp->napi, skb);
1107 rx_buf->data = NULL;
1109 bd_cons = NEXT_RX_IDX(bd_cons);
1110 bd_prod = NEXT_RX_IDX(bd_prod);
1111 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1114 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1115 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1117 /* mark CQE as free */
1118 BNX2X_SEED_CQE(cqe_fp);
1120 if (rx_pkt == budget)
1123 comp_ring_cons = RCQ_BD(sw_comp_cons);
1124 cqe = &fp->rx_comp_ring[comp_ring_cons];
1125 cqe_fp = &cqe->fast_path_cqe;
1128 fp->rx_bd_cons = bd_cons;
1129 fp->rx_bd_prod = bd_prod_fw;
1130 fp->rx_comp_cons = sw_comp_cons;
1131 fp->rx_comp_prod = sw_comp_prod;
1133 /* Update producers */
1134 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1137 fp->rx_pkt += rx_pkt;
1143 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1145 struct bnx2x_fastpath *fp = fp_cookie;
1146 struct bnx2x *bp = fp->bp;
1150 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1151 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1153 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1155 #ifdef BNX2X_STOP_ON_ERROR
1156 if (unlikely(bp->panic))
1160 /* Handle Rx and Tx according to MSI-X vector */
1161 for_each_cos_in_tx_queue(fp, cos)
1162 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1164 prefetch(&fp->sb_running_index[SM_RX_ID]);
1165 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1170 /* HW Lock for shared dual port PHYs */
1171 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1173 mutex_lock(&bp->port.phy_mutex);
1175 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1178 void bnx2x_release_phy_lock(struct bnx2x *bp)
1180 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1182 mutex_unlock(&bp->port.phy_mutex);
1185 /* calculates MF speed according to current linespeed and MF configuration */
1186 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1188 u16 line_speed = bp->link_vars.line_speed;
1190 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1191 bp->mf_config[BP_VN(bp)]);
1193 /* Calculate the current MAX line speed limit for the MF
1196 if (IS_MF_PERCENT_BW(bp))
1197 line_speed = (line_speed * maxCfg) / 100;
1198 else { /* SD mode */
1199 u16 vn_max_rate = maxCfg * 100;
1201 if (vn_max_rate < line_speed)
1202 line_speed = vn_max_rate;
1210 * bnx2x_fill_report_data - fill link report data to report
1212 * @bp: driver handle
1213 * @data: link state to update
1215 * It uses a none-atomic bit operations because is called under the mutex.
1217 static void bnx2x_fill_report_data(struct bnx2x *bp,
1218 struct bnx2x_link_report_data *data)
1220 memset(data, 0, sizeof(*data));
1223 /* Fill the report data: effective line speed */
1224 data->line_speed = bnx2x_get_mf_speed(bp);
1227 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1228 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1229 &data->link_report_flags);
1231 if (!BNX2X_NUM_ETH_QUEUES(bp))
1232 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1233 &data->link_report_flags);
1236 if (bp->link_vars.duplex == DUPLEX_FULL)
1237 __set_bit(BNX2X_LINK_REPORT_FD,
1238 &data->link_report_flags);
1240 /* Rx Flow Control is ON */
1241 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1242 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1243 &data->link_report_flags);
1245 /* Tx Flow Control is ON */
1246 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1247 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1248 &data->link_report_flags);
1250 *data = bp->vf_link_vars;
1255 * bnx2x_link_report - report link status to OS.
1257 * @bp: driver handle
1259 * Calls the __bnx2x_link_report() under the same locking scheme
1260 * as a link/PHY state managing code to ensure a consistent link
1264 void bnx2x_link_report(struct bnx2x *bp)
1266 bnx2x_acquire_phy_lock(bp);
1267 __bnx2x_link_report(bp);
1268 bnx2x_release_phy_lock(bp);
1272 * __bnx2x_link_report - report link status to OS.
1274 * @bp: driver handle
1276 * None atomic implementation.
1277 * Should be called under the phy_lock.
1279 void __bnx2x_link_report(struct bnx2x *bp)
1281 struct bnx2x_link_report_data cur_data;
1283 if (bp->force_link_down) {
1284 bp->link_vars.link_up = 0;
1289 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1290 bnx2x_read_mf_cfg(bp);
1292 /* Read the current link report info */
1293 bnx2x_fill_report_data(bp, &cur_data);
1295 /* Don't report link down or exactly the same link status twice */
1296 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1297 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1298 &bp->last_reported_link.link_report_flags) &&
1299 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1300 &cur_data.link_report_flags)))
1305 /* We are going to report a new link parameters now -
1306 * remember the current data for the next time.
1308 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1310 /* propagate status to VFs */
1312 bnx2x_iov_link_update(bp);
1314 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1315 &cur_data.link_report_flags)) {
1316 netif_carrier_off(bp->dev);
1317 netdev_err(bp->dev, "NIC Link is Down\n");
1323 netif_carrier_on(bp->dev);
1325 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1326 &cur_data.link_report_flags))
1331 /* Handle the FC at the end so that only these flags would be
1332 * possibly set. This way we may easily check if there is no FC
1335 if (cur_data.link_report_flags) {
1336 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1337 &cur_data.link_report_flags)) {
1338 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1339 &cur_data.link_report_flags))
1340 flow = "ON - receive & transmit";
1342 flow = "ON - receive";
1344 flow = "ON - transmit";
1349 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1350 cur_data.line_speed, duplex, flow);
1354 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1358 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1359 struct eth_rx_sge *sge;
1361 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1363 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1364 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1367 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1368 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1372 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1373 struct bnx2x_fastpath *fp, int last)
1377 for (i = 0; i < last; i++) {
1378 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1379 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1380 u8 *data = first_buf->data;
1383 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1386 if (tpa_info->tpa_state == BNX2X_TPA_START)
1387 dma_unmap_single(&bp->pdev->dev,
1388 dma_unmap_addr(first_buf, mapping),
1389 fp->rx_buf_size, DMA_FROM_DEVICE);
1390 bnx2x_frag_free(fp, data);
1391 first_buf->data = NULL;
1395 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1399 for_each_rx_queue_cnic(bp, j) {
1400 struct bnx2x_fastpath *fp = &bp->fp[j];
1404 /* Activate BD ring */
1406 * this will generate an interrupt (to the TSTORM)
1407 * must only be done after chip is initialized
1409 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1414 void bnx2x_init_rx_rings(struct bnx2x *bp)
1416 int func = BP_FUNC(bp);
1420 /* Allocate TPA resources */
1421 for_each_eth_queue(bp, j) {
1422 struct bnx2x_fastpath *fp = &bp->fp[j];
1425 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1427 if (fp->mode != TPA_MODE_DISABLED) {
1428 /* Fill the per-aggregation pool */
1429 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1430 struct bnx2x_agg_info *tpa_info =
1432 struct sw_rx_bd *first_buf =
1433 &tpa_info->first_buf;
1436 bnx2x_frag_alloc(fp, GFP_KERNEL);
1437 if (!first_buf->data) {
1438 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1440 bnx2x_free_tpa_pool(bp, fp, i);
1441 fp->mode = TPA_MODE_DISABLED;
1444 dma_unmap_addr_set(first_buf, mapping, 0);
1445 tpa_info->tpa_state = BNX2X_TPA_STOP;
1448 /* "next page" elements initialization */
1449 bnx2x_set_next_page_sgl(fp);
1451 /* set SGEs bit mask */
1452 bnx2x_init_sge_ring_bit_mask(fp);
1454 /* Allocate SGEs and initialize the ring elements */
1455 for (i = 0, ring_prod = 0;
1456 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1458 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1460 BNX2X_ERR("was only able to allocate %d rx sges\n",
1462 BNX2X_ERR("disabling TPA for queue[%d]\n",
1464 /* Cleanup already allocated elements */
1465 bnx2x_free_rx_sge_range(bp, fp,
1467 bnx2x_free_tpa_pool(bp, fp,
1469 fp->mode = TPA_MODE_DISABLED;
1473 ring_prod = NEXT_SGE_IDX(ring_prod);
1476 fp->rx_sge_prod = ring_prod;
1480 for_each_eth_queue(bp, j) {
1481 struct bnx2x_fastpath *fp = &bp->fp[j];
1485 /* Activate BD ring */
1487 * this will generate an interrupt (to the TSTORM)
1488 * must only be done after chip is initialized
1490 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1496 if (CHIP_IS_E1(bp)) {
1497 REG_WR(bp, BAR_USTRORM_INTMEM +
1498 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1499 U64_LO(fp->rx_comp_mapping));
1500 REG_WR(bp, BAR_USTRORM_INTMEM +
1501 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1502 U64_HI(fp->rx_comp_mapping));
1507 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1510 struct bnx2x *bp = fp->bp;
1512 for_each_cos_in_tx_queue(fp, cos) {
1513 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1514 unsigned pkts_compl = 0, bytes_compl = 0;
1516 u16 sw_prod = txdata->tx_pkt_prod;
1517 u16 sw_cons = txdata->tx_pkt_cons;
1519 while (sw_cons != sw_prod) {
1520 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1521 &pkts_compl, &bytes_compl);
1525 netdev_tx_reset_queue(
1526 netdev_get_tx_queue(bp->dev,
1527 txdata->txq_index));
1531 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1535 for_each_tx_queue_cnic(bp, i) {
1536 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1540 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1544 for_each_eth_queue(bp, i) {
1545 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1549 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1551 struct bnx2x *bp = fp->bp;
1554 /* ring wasn't allocated */
1555 if (fp->rx_buf_ring == NULL)
1558 for (i = 0; i < NUM_RX_BD; i++) {
1559 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1560 u8 *data = rx_buf->data;
1564 dma_unmap_single(&bp->pdev->dev,
1565 dma_unmap_addr(rx_buf, mapping),
1566 fp->rx_buf_size, DMA_FROM_DEVICE);
1568 rx_buf->data = NULL;
1569 bnx2x_frag_free(fp, data);
1573 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1577 for_each_rx_queue_cnic(bp, j) {
1578 bnx2x_free_rx_bds(&bp->fp[j]);
1582 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1586 for_each_eth_queue(bp, j) {
1587 struct bnx2x_fastpath *fp = &bp->fp[j];
1589 bnx2x_free_rx_bds(fp);
1591 if (fp->mode != TPA_MODE_DISABLED)
1592 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1596 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1598 bnx2x_free_tx_skbs_cnic(bp);
1599 bnx2x_free_rx_skbs_cnic(bp);
1602 void bnx2x_free_skbs(struct bnx2x *bp)
1604 bnx2x_free_tx_skbs(bp);
1605 bnx2x_free_rx_skbs(bp);
1608 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1610 /* load old values */
1611 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1613 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1614 /* leave all but MAX value */
1615 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1617 /* set new MAX value */
1618 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1619 & FUNC_MF_CFG_MAX_BW_MASK;
1621 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1626 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1628 * @bp: driver handle
1629 * @nvecs: number of vectors to be released
1631 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1635 if (nvecs == offset)
1638 /* VFs don't have a default SB */
1640 free_irq(bp->msix_table[offset].vector, bp->dev);
1641 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1642 bp->msix_table[offset].vector);
1646 if (CNIC_SUPPORT(bp)) {
1647 if (nvecs == offset)
1652 for_each_eth_queue(bp, i) {
1653 if (nvecs == offset)
1655 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1656 i, bp->msix_table[offset].vector);
1658 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1662 void bnx2x_free_irq(struct bnx2x *bp)
1664 if (bp->flags & USING_MSIX_FLAG &&
1665 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1666 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1668 /* vfs don't have a default status block */
1672 bnx2x_free_msix_irqs(bp, nvecs);
1674 free_irq(bp->dev->irq, bp->dev);
1678 int bnx2x_enable_msix(struct bnx2x *bp)
1680 int msix_vec = 0, i, rc;
1682 /* VFs don't have a default status block */
1684 bp->msix_table[msix_vec].entry = msix_vec;
1685 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1686 bp->msix_table[0].entry);
1690 /* Cnic requires an msix vector for itself */
1691 if (CNIC_SUPPORT(bp)) {
1692 bp->msix_table[msix_vec].entry = msix_vec;
1693 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1694 msix_vec, bp->msix_table[msix_vec].entry);
1698 /* We need separate vectors for ETH queues only (not FCoE) */
1699 for_each_eth_queue(bp, i) {
1700 bp->msix_table[msix_vec].entry = msix_vec;
1701 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1702 msix_vec, msix_vec, i);
1706 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1709 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1710 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1712 * reconfigure number of tx/rx queues according to available
1715 if (rc == -ENOSPC) {
1716 /* Get by with single vector */
1717 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1719 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1724 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1725 bp->flags |= USING_SINGLE_MSIX_FLAG;
1727 BNX2X_DEV_INFO("set number of queues to 1\n");
1728 bp->num_ethernet_queues = 1;
1729 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1730 } else if (rc < 0) {
1731 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1733 } else if (rc < msix_vec) {
1734 /* how less vectors we will have? */
1735 int diff = msix_vec - rc;
1737 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1740 * decrease number of queues by number of unallocated entries
1742 bp->num_ethernet_queues -= diff;
1743 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1745 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1749 bp->flags |= USING_MSIX_FLAG;
1754 /* fall to INTx if not enough memory */
1756 bp->flags |= DISABLE_MSI_FLAG;
1761 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1763 int i, rc, offset = 0;
1765 /* no default status block for vf */
1767 rc = request_irq(bp->msix_table[offset++].vector,
1768 bnx2x_msix_sp_int, 0,
1769 bp->dev->name, bp->dev);
1771 BNX2X_ERR("request sp irq failed\n");
1776 if (CNIC_SUPPORT(bp))
1779 for_each_eth_queue(bp, i) {
1780 struct bnx2x_fastpath *fp = &bp->fp[i];
1781 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1784 rc = request_irq(bp->msix_table[offset].vector,
1785 bnx2x_msix_fp_int, 0, fp->name, fp);
1787 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1788 bp->msix_table[offset].vector, rc);
1789 bnx2x_free_msix_irqs(bp, offset);
1796 i = BNX2X_NUM_ETH_QUEUES(bp);
1798 offset = 1 + CNIC_SUPPORT(bp);
1799 netdev_info(bp->dev,
1800 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1801 bp->msix_table[0].vector,
1802 0, bp->msix_table[offset].vector,
1803 i - 1, bp->msix_table[offset + i - 1].vector);
1805 offset = CNIC_SUPPORT(bp);
1806 netdev_info(bp->dev,
1807 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1808 0, bp->msix_table[offset].vector,
1809 i - 1, bp->msix_table[offset + i - 1].vector);
1814 int bnx2x_enable_msi(struct bnx2x *bp)
1818 rc = pci_enable_msi(bp->pdev);
1820 BNX2X_DEV_INFO("MSI is not attainable\n");
1823 bp->flags |= USING_MSI_FLAG;
1828 static int bnx2x_req_irq(struct bnx2x *bp)
1830 unsigned long flags;
1833 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1836 flags = IRQF_SHARED;
1838 if (bp->flags & USING_MSIX_FLAG)
1839 irq = bp->msix_table[0].vector;
1841 irq = bp->pdev->irq;
1843 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1846 static int bnx2x_setup_irqs(struct bnx2x *bp)
1849 if (bp->flags & USING_MSIX_FLAG &&
1850 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1851 rc = bnx2x_req_msix_irqs(bp);
1855 rc = bnx2x_req_irq(bp);
1857 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1860 if (bp->flags & USING_MSI_FLAG) {
1861 bp->dev->irq = bp->pdev->irq;
1862 netdev_info(bp->dev, "using MSI IRQ %d\n",
1865 if (bp->flags & USING_MSIX_FLAG) {
1866 bp->dev->irq = bp->msix_table[0].vector;
1867 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1875 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1879 for_each_rx_queue_cnic(bp, i) {
1880 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1881 napi_enable(&bnx2x_fp(bp, i, napi));
1885 static void bnx2x_napi_enable(struct bnx2x *bp)
1889 for_each_eth_queue(bp, i) {
1890 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1891 napi_enable(&bnx2x_fp(bp, i, napi));
1895 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1899 for_each_rx_queue_cnic(bp, i) {
1900 napi_disable(&bnx2x_fp(bp, i, napi));
1901 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1902 usleep_range(1000, 2000);
1906 static void bnx2x_napi_disable(struct bnx2x *bp)
1910 for_each_eth_queue(bp, i) {
1911 napi_disable(&bnx2x_fp(bp, i, napi));
1912 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1913 usleep_range(1000, 2000);
1917 void bnx2x_netif_start(struct bnx2x *bp)
1919 if (netif_running(bp->dev)) {
1920 bnx2x_napi_enable(bp);
1921 if (CNIC_LOADED(bp))
1922 bnx2x_napi_enable_cnic(bp);
1923 bnx2x_int_enable(bp);
1924 if (bp->state == BNX2X_STATE_OPEN)
1925 netif_tx_wake_all_queues(bp->dev);
1929 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1931 bnx2x_int_disable_sync(bp, disable_hw);
1932 bnx2x_napi_disable(bp);
1933 if (CNIC_LOADED(bp))
1934 bnx2x_napi_disable_cnic(bp);
1937 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1938 void *accel_priv, select_queue_fallback_t fallback)
1940 struct bnx2x *bp = netdev_priv(dev);
1942 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1943 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1944 u16 ether_type = ntohs(hdr->h_proto);
1946 /* Skip VLAN tag if present */
1947 if (ether_type == ETH_P_8021Q) {
1948 struct vlan_ethhdr *vhdr =
1949 (struct vlan_ethhdr *)skb->data;
1951 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1954 /* If ethertype is FCoE or FIP - use FCoE ring */
1955 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1956 return bnx2x_fcoe_tx(bp, txq_index);
1959 /* select a non-FCoE queue */
1960 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
1963 void bnx2x_set_num_queues(struct bnx2x *bp)
1966 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1968 /* override in STORAGE SD modes */
1969 if (IS_MF_STORAGE_ONLY(bp))
1970 bp->num_ethernet_queues = 1;
1972 /* Add special queues */
1973 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1974 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1976 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1980 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1982 * @bp: Driver handle
1984 * We currently support for at most 16 Tx queues for each CoS thus we will
1985 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1988 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1989 * index after all ETH L2 indices.
1991 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1992 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1993 * 16..31,...) with indices that are not coupled with any real Tx queue.
1995 * The proper configuration of skb->queue_mapping is handled by
1996 * bnx2x_select_queue() and __skb_tx_hash().
1998 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1999 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
2001 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
2005 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
2006 rx = BNX2X_NUM_ETH_QUEUES(bp);
2008 /* account for fcoe queue */
2009 if (include_cnic && !NO_FCOE(bp)) {
2014 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2016 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2019 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2021 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2025 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2031 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2035 for_each_queue(bp, i) {
2036 struct bnx2x_fastpath *fp = &bp->fp[i];
2039 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2042 * Although there are no IP frames expected to arrive to
2043 * this ring we still want to add an
2044 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2047 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2050 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2051 IP_HEADER_ALIGNMENT_PADDING +
2054 BNX2X_FW_RX_ALIGN_END;
2055 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2056 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2057 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2058 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2060 fp->rx_frag_size = 0;
2064 static int bnx2x_init_rss(struct bnx2x *bp)
2067 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2069 /* Prepare the initial contents for the indirection table if RSS is
2072 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2073 bp->rss_conf_obj.ind_table[i] =
2075 ethtool_rxfh_indir_default(i, num_eth_queues);
2078 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2079 * per-port, so if explicit configuration is needed , do it only
2082 * For 57712 and newer on the other hand it's a per-function
2085 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2088 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2089 bool config_hash, bool enable)
2091 struct bnx2x_config_rss_params params = {NULL};
2093 /* Although RSS is meaningless when there is a single HW queue we
2094 * still need it enabled in order to have HW Rx hash generated.
2096 * if (!is_eth_multi(bp))
2097 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2100 params.rss_obj = rss_obj;
2102 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2105 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2107 /* RSS configuration */
2108 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2109 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2110 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2111 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2112 if (rss_obj->udp_rss_v4)
2113 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2114 if (rss_obj->udp_rss_v6)
2115 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2117 if (!CHIP_IS_E1x(bp)) {
2118 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2119 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2120 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2122 /* valid only for TUNN_MODE_GRE tunnel mode */
2123 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2126 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2130 params.rss_result_mask = MULTI_MASK;
2132 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2136 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2137 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2141 return bnx2x_config_rss(bp, ¶ms);
2143 return bnx2x_vfpf_config_rss(bp, ¶ms);
2146 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2148 struct bnx2x_func_state_params func_params = {NULL};
2150 /* Prepare parameters for function state transitions */
2151 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2153 func_params.f_obj = &bp->func_obj;
2154 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2156 func_params.params.hw_init.load_phase = load_code;
2158 return bnx2x_func_state_change(bp, &func_params);
2162 * Cleans the object that have internal lists without sending
2163 * ramrods. Should be run when interrupts are disabled.
2165 void bnx2x_squeeze_objects(struct bnx2x *bp)
2168 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2169 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2170 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2172 /***************** Cleanup MACs' object first *************************/
2174 /* Wait for completion of requested */
2175 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2176 /* Perform a dry cleanup */
2177 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2179 /* Clean ETH primary MAC */
2180 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2181 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2184 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2186 /* Cleanup UC list */
2188 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2189 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2192 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2194 /***************** Now clean mcast object *****************************/
2195 rparam.mcast_obj = &bp->mcast_obj;
2196 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2198 /* Add a DEL command... - Since we're doing a driver cleanup only,
2199 * we take a lock surrounding both the initial send and the CONTs,
2200 * as we don't want a true completion to disrupt us in the middle.
2202 netif_addr_lock_bh(bp->dev);
2203 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2205 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2208 /* ...and wait until all pending commands are cleared */
2209 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2212 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2214 netif_addr_unlock_bh(bp->dev);
2218 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2220 netif_addr_unlock_bh(bp->dev);
2223 #ifndef BNX2X_STOP_ON_ERROR
2224 #define LOAD_ERROR_EXIT(bp, label) \
2226 (bp)->state = BNX2X_STATE_ERROR; \
2230 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2232 bp->cnic_loaded = false; \
2235 #else /*BNX2X_STOP_ON_ERROR*/
2236 #define LOAD_ERROR_EXIT(bp, label) \
2238 (bp)->state = BNX2X_STATE_ERROR; \
2242 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2244 bp->cnic_loaded = false; \
2248 #endif /*BNX2X_STOP_ON_ERROR*/
2250 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2252 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2253 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2257 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2259 int num_groups, vf_headroom = 0;
2260 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2262 /* number of queues for statistics is number of eth queues + FCoE */
2263 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2265 /* Total number of FW statistics requests =
2266 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2267 * and fcoe l2 queue) stats + num of queues (which includes another 1
2268 * for fcoe l2 queue if applicable)
2270 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2272 /* vf stats appear in the request list, but their data is allocated by
2273 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2274 * it is used to determine where to place the vf stats queries in the
2278 vf_headroom = bnx2x_vf_headroom(bp);
2280 /* Request is built from stats_query_header and an array of
2281 * stats_query_cmd_group each of which contains
2282 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2283 * configured in the stats_query_header.
2286 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2287 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2290 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2291 bp->fw_stats_num, vf_headroom, num_groups);
2292 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2293 num_groups * sizeof(struct stats_query_cmd_group);
2295 /* Data for statistics requests + stats_counter
2296 * stats_counter holds per-STORM counters that are incremented
2297 * when STORM has finished with the current request.
2298 * memory for FCoE offloaded statistics are counted anyway,
2299 * even if they will not be sent.
2300 * VF stats are not accounted for here as the data of VF stats is stored
2301 * in memory allocated by the VF, not here.
2303 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2304 sizeof(struct per_pf_stats) +
2305 sizeof(struct fcoe_statistics_params) +
2306 sizeof(struct per_queue_stats) * num_queue_stats +
2307 sizeof(struct stats_counter);
2309 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2310 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2315 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2316 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2317 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2318 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2319 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2320 bp->fw_stats_req_sz;
2322 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2323 U64_HI(bp->fw_stats_req_mapping),
2324 U64_LO(bp->fw_stats_req_mapping));
2325 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2326 U64_HI(bp->fw_stats_data_mapping),
2327 U64_LO(bp->fw_stats_data_mapping));
2331 bnx2x_free_fw_stats_mem(bp);
2332 BNX2X_ERR("Can't allocate FW stats memory\n");
2336 /* send load request to mcp and analyze response */
2337 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2343 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2344 DRV_MSG_SEQ_NUMBER_MASK);
2345 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2347 /* Get current FW pulse sequence */
2348 bp->fw_drv_pulse_wr_seq =
2349 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2350 DRV_PULSE_SEQ_MASK);
2351 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2353 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2355 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2356 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2359 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2361 /* if mcp fails to respond we must abort */
2362 if (!(*load_code)) {
2363 BNX2X_ERR("MCP response failure, aborting\n");
2367 /* If mcp refused (e.g. other port is in diagnostic mode) we
2370 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2371 BNX2X_ERR("MCP refused load request, aborting\n");
2377 /* check whether another PF has already loaded FW to chip. In
2378 * virtualized environments a pf from another VM may have already
2379 * initialized the device including loading FW
2381 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2383 /* is another pf loaded on this engine? */
2384 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2385 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2386 /* build my FW version dword */
2389 /* read loaded FW from chip */
2390 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2392 u32 my_fw = ~loaded_fw;
2394 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2397 /* abort nic load if version mismatch */
2398 if (my_fw != loaded_fw) {
2400 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2403 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2411 /* returns the "mcp load_code" according to global load_count array */
2412 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2414 int path = BP_PATH(bp);
2416 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2417 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2418 bnx2x_load_count[path][2]);
2419 bnx2x_load_count[path][0]++;
2420 bnx2x_load_count[path][1 + port]++;
2421 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2422 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2423 bnx2x_load_count[path][2]);
2424 if (bnx2x_load_count[path][0] == 1)
2425 return FW_MSG_CODE_DRV_LOAD_COMMON;
2426 else if (bnx2x_load_count[path][1 + port] == 1)
2427 return FW_MSG_CODE_DRV_LOAD_PORT;
2429 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2432 /* mark PMF if applicable */
2433 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2435 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2436 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2437 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2439 /* We need the barrier to ensure the ordering between the
2440 * writing to bp->port.pmf here and reading it from the
2441 * bnx2x_periodic_task().
2448 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2451 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2453 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2454 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2455 (bp->common.shmem2_base)) {
2456 if (SHMEM2_HAS(bp, dcc_support))
2457 SHMEM2_WR(bp, dcc_support,
2458 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2459 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2460 if (SHMEM2_HAS(bp, afex_driver_support))
2461 SHMEM2_WR(bp, afex_driver_support,
2462 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2465 /* Set AFEX default VLAN tag to an invalid value */
2466 bp->afex_def_vlan_tag = -1;
2470 * bnx2x_bz_fp - zero content of the fastpath structure.
2472 * @bp: driver handle
2473 * @index: fastpath index to be zeroed
2475 * Makes sure the contents of the bp->fp[index].napi is kept
2478 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2480 struct bnx2x_fastpath *fp = &bp->fp[index];
2482 struct napi_struct orig_napi = fp->napi;
2483 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2485 /* bzero bnx2x_fastpath contents */
2487 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2488 sizeof(struct bnx2x_agg_info));
2489 memset(fp, 0, sizeof(*fp));
2491 /* Restore the NAPI object as it has been already initialized */
2492 fp->napi = orig_napi;
2493 fp->tpa_info = orig_tpa_info;
2497 fp->max_cos = bp->max_cos;
2499 /* Special queues support only one CoS */
2502 /* Init txdata pointers */
2504 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2506 for_each_cos_in_tx_queue(fp, cos)
2507 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2508 BNX2X_NUM_ETH_QUEUES(bp) + index];
2510 /* set the tpa flag for each queue. The tpa flag determines the queue
2511 * minimal size so it must be set prior to queue memory allocation
2513 if (bp->dev->features & NETIF_F_LRO)
2514 fp->mode = TPA_MODE_LRO;
2515 else if (bp->dev->features & NETIF_F_GRO &&
2516 bnx2x_mtu_allows_gro(bp->dev->mtu))
2517 fp->mode = TPA_MODE_GRO;
2519 fp->mode = TPA_MODE_DISABLED;
2521 /* We don't want TPA if it's disabled in bp
2522 * or if this is an FCoE L2 ring.
2524 if (bp->disable_tpa || IS_FCOE_FP(fp))
2525 fp->mode = TPA_MODE_DISABLED;
2528 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2532 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2535 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2536 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2539 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2542 int bnx2x_load_cnic(struct bnx2x *bp)
2544 int i, rc, port = BP_PORT(bp);
2546 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2548 mutex_init(&bp->cnic_mutex);
2551 rc = bnx2x_alloc_mem_cnic(bp);
2553 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2554 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2558 rc = bnx2x_alloc_fp_mem_cnic(bp);
2560 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2561 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2564 /* Update the number of queues with the cnic queues */
2565 rc = bnx2x_set_real_num_queues(bp, 1);
2567 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2568 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2571 /* Add all CNIC NAPI objects */
2572 bnx2x_add_all_napi_cnic(bp);
2573 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2574 bnx2x_napi_enable_cnic(bp);
2576 rc = bnx2x_init_hw_func_cnic(bp);
2578 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2580 bnx2x_nic_init_cnic(bp);
2583 /* Enable Timer scan */
2584 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2586 /* setup cnic queues */
2587 for_each_cnic_queue(bp, i) {
2588 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2590 BNX2X_ERR("Queue setup failed\n");
2591 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2596 /* Initialize Rx filter. */
2597 bnx2x_set_rx_mode_inner(bp);
2599 /* re-read iscsi info */
2600 bnx2x_get_iscsi_info(bp);
2601 bnx2x_setup_cnic_irq_info(bp);
2602 bnx2x_setup_cnic_info(bp);
2603 bp->cnic_loaded = true;
2604 if (bp->state == BNX2X_STATE_OPEN)
2605 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2607 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2611 #ifndef BNX2X_STOP_ON_ERROR
2613 /* Disable Timer scan */
2614 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2617 bnx2x_napi_disable_cnic(bp);
2618 /* Update the number of queues without the cnic queues */
2619 if (bnx2x_set_real_num_queues(bp, 0))
2620 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2622 BNX2X_ERR("CNIC-related load failed\n");
2623 bnx2x_free_fp_mem_cnic(bp);
2624 bnx2x_free_mem_cnic(bp);
2626 #endif /* ! BNX2X_STOP_ON_ERROR */
2629 /* must be called with rtnl_lock */
2630 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2632 int port = BP_PORT(bp);
2633 int i, rc = 0, load_code = 0;
2635 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2637 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2639 #ifdef BNX2X_STOP_ON_ERROR
2640 if (unlikely(bp->panic)) {
2641 BNX2X_ERR("Can't load NIC when there is panic\n");
2646 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2648 /* zero the structure w/o any lock, before SP handler is initialized */
2649 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2650 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2651 &bp->last_reported_link.link_report_flags);
2654 /* must be called before memory allocation and HW init */
2655 bnx2x_ilt_set_info(bp);
2658 * Zero fastpath structures preserving invariants like napi, which are
2659 * allocated only once, fp index, max_cos, bp pointer.
2660 * Also set fp->mode and txdata_ptr.
2662 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2663 for_each_queue(bp, i)
2665 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2666 bp->num_cnic_queues) *
2667 sizeof(struct bnx2x_fp_txdata));
2669 bp->fcoe_init = false;
2671 /* Set the receive queues buffer size */
2672 bnx2x_set_rx_buf_size(bp);
2675 rc = bnx2x_alloc_mem(bp);
2677 BNX2X_ERR("Unable to allocate bp memory\n");
2682 /* need to be done after alloc mem, since it's self adjusting to amount
2683 * of memory available for RSS queues
2685 rc = bnx2x_alloc_fp_mem(bp);
2687 BNX2X_ERR("Unable to allocate memory for fps\n");
2688 LOAD_ERROR_EXIT(bp, load_error0);
2691 /* Allocated memory for FW statistics */
2692 rc = bnx2x_alloc_fw_stats_mem(bp);
2694 LOAD_ERROR_EXIT(bp, load_error0);
2696 /* request pf to initialize status blocks */
2698 rc = bnx2x_vfpf_init(bp);
2700 LOAD_ERROR_EXIT(bp, load_error0);
2703 /* As long as bnx2x_alloc_mem() may possibly update
2704 * bp->num_queues, bnx2x_set_real_num_queues() should always
2705 * come after it. At this stage cnic queues are not counted.
2707 rc = bnx2x_set_real_num_queues(bp, 0);
2709 BNX2X_ERR("Unable to set real_num_queues\n");
2710 LOAD_ERROR_EXIT(bp, load_error0);
2713 /* configure multi cos mappings in kernel.
2714 * this configuration may be overridden by a multi class queue
2715 * discipline or by a dcbx negotiation result.
2717 bnx2x_setup_tc(bp->dev, bp->max_cos);
2719 /* Add all NAPI objects */
2720 bnx2x_add_all_napi(bp);
2721 DP(NETIF_MSG_IFUP, "napi added\n");
2722 bnx2x_napi_enable(bp);
2725 /* set pf load just before approaching the MCP */
2726 bnx2x_set_pf_load(bp);
2728 /* if mcp exists send load request and analyze response */
2729 if (!BP_NOMCP(bp)) {
2730 /* attempt to load pf */
2731 rc = bnx2x_nic_load_request(bp, &load_code);
2733 LOAD_ERROR_EXIT(bp, load_error1);
2735 /* what did mcp say? */
2736 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2738 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2739 LOAD_ERROR_EXIT(bp, load_error2);
2742 load_code = bnx2x_nic_load_no_mcp(bp, port);
2745 /* mark pmf if applicable */
2746 bnx2x_nic_load_pmf(bp, load_code);
2748 /* Init Function state controlling object */
2749 bnx2x__init_func_obj(bp);
2752 rc = bnx2x_init_hw(bp, load_code);
2754 BNX2X_ERR("HW init failed, aborting\n");
2755 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2756 LOAD_ERROR_EXIT(bp, load_error2);
2760 bnx2x_pre_irq_nic_init(bp);
2762 /* Connect to IRQs */
2763 rc = bnx2x_setup_irqs(bp);
2765 BNX2X_ERR("setup irqs failed\n");
2767 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2768 LOAD_ERROR_EXIT(bp, load_error2);
2771 /* Init per-function objects */
2773 /* Setup NIC internals and enable interrupts */
2774 bnx2x_post_irq_nic_init(bp, load_code);
2776 bnx2x_init_bp_objs(bp);
2777 bnx2x_iov_nic_init(bp);
2779 /* Set AFEX default VLAN tag to an invalid value */
2780 bp->afex_def_vlan_tag = -1;
2781 bnx2x_nic_load_afex_dcc(bp, load_code);
2782 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2783 rc = bnx2x_func_start(bp);
2785 BNX2X_ERR("Function start failed!\n");
2786 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2788 LOAD_ERROR_EXIT(bp, load_error3);
2791 /* Send LOAD_DONE command to MCP */
2792 if (!BP_NOMCP(bp)) {
2793 load_code = bnx2x_fw_command(bp,
2794 DRV_MSG_CODE_LOAD_DONE, 0);
2796 BNX2X_ERR("MCP response failure, aborting\n");
2798 LOAD_ERROR_EXIT(bp, load_error3);
2802 /* initialize FW coalescing state machines in RAM */
2803 bnx2x_update_coalesce(bp);
2806 /* setup the leading queue */
2807 rc = bnx2x_setup_leading(bp);
2809 BNX2X_ERR("Setup leading failed!\n");
2810 LOAD_ERROR_EXIT(bp, load_error3);
2813 /* set up the rest of the queues */
2814 for_each_nondefault_eth_queue(bp, i) {
2816 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2818 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2820 BNX2X_ERR("Queue %d setup failed\n", i);
2821 LOAD_ERROR_EXIT(bp, load_error3);
2826 rc = bnx2x_init_rss(bp);
2828 BNX2X_ERR("PF RSS init failed\n");
2829 LOAD_ERROR_EXIT(bp, load_error3);
2832 /* Now when Clients are configured we are ready to work */
2833 bp->state = BNX2X_STATE_OPEN;
2835 /* Configure a ucast MAC */
2837 rc = bnx2x_set_eth_mac(bp, true);
2839 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2842 BNX2X_ERR("Setting Ethernet MAC failed\n");
2843 LOAD_ERROR_EXIT(bp, load_error3);
2846 if (IS_PF(bp) && bp->pending_max) {
2847 bnx2x_update_max_mf_config(bp, bp->pending_max);
2848 bp->pending_max = 0;
2851 bp->force_link_down = false;
2853 rc = bnx2x_initial_phy_init(bp, load_mode);
2855 LOAD_ERROR_EXIT(bp, load_error3);
2857 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2859 /* Start fast path */
2861 /* Re-configure vlan filters */
2862 rc = bnx2x_vlan_reconfigure_vid(bp);
2864 LOAD_ERROR_EXIT(bp, load_error3);
2866 /* Initialize Rx filter. */
2867 bnx2x_set_rx_mode_inner(bp);
2869 if (bp->flags & PTP_SUPPORTED) {
2871 bnx2x_configure_ptp_filters(bp);
2874 switch (load_mode) {
2876 /* Tx queue should be only re-enabled */
2877 netif_tx_wake_all_queues(bp->dev);
2881 netif_tx_start_all_queues(bp->dev);
2882 smp_mb__after_atomic();
2886 case LOAD_LOOPBACK_EXT:
2887 bp->state = BNX2X_STATE_DIAG;
2895 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2897 bnx2x__link_status_update(bp);
2899 /* start the timer */
2900 mod_timer(&bp->timer, jiffies + bp->current_interval);
2902 if (CNIC_ENABLED(bp))
2903 bnx2x_load_cnic(bp);
2906 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2908 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2909 /* mark driver is loaded in shmem2 */
2911 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2912 val &= ~DRV_FLAGS_MTU_MASK;
2913 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2914 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2915 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2916 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2919 /* Wait for all pending SP commands to complete */
2920 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2921 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2922 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2926 /* Update driver data for On-Chip MFW dump. */
2928 bnx2x_update_mfw_dump(bp);
2930 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2931 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2932 bnx2x_dcbx_init(bp, false);
2934 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2935 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2937 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2941 #ifndef BNX2X_STOP_ON_ERROR
2944 bnx2x_int_disable_sync(bp, 1);
2946 /* Clean queueable objects */
2947 bnx2x_squeeze_objects(bp);
2950 /* Free SKBs, SGEs, TPA pool and driver internals */
2951 bnx2x_free_skbs(bp);
2952 for_each_rx_queue(bp, i)
2953 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2958 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2959 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2965 bnx2x_napi_disable(bp);
2966 bnx2x_del_all_napi(bp);
2968 /* clear pf_load status, as it was already set */
2970 bnx2x_clear_pf_load(bp);
2972 bnx2x_free_fw_stats_mem(bp);
2973 bnx2x_free_fp_mem(bp);
2977 #endif /* ! BNX2X_STOP_ON_ERROR */
2980 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2984 /* Wait until tx fastpath tasks complete */
2985 for_each_tx_queue(bp, i) {
2986 struct bnx2x_fastpath *fp = &bp->fp[i];
2988 for_each_cos_in_tx_queue(fp, cos)
2989 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2996 /* must be called with rtnl_lock */
2997 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3000 bool global = false;
3002 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
3004 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
3005 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
3007 /* mark driver is unloaded in shmem2 */
3008 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
3010 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
3011 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
3012 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
3015 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3016 (bp->state == BNX2X_STATE_CLOSED ||
3017 bp->state == BNX2X_STATE_ERROR)) {
3018 /* We can get here if the driver has been unloaded
3019 * during parity error recovery and is either waiting for a
3020 * leader to complete or for other functions to unload and
3021 * then ifdown has been issued. In this case we want to
3022 * unload and let other functions to complete a recovery
3025 bp->recovery_state = BNX2X_RECOVERY_DONE;
3027 bnx2x_release_leader_lock(bp);
3030 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3031 BNX2X_ERR("Can't unload in closed or error state\n");
3035 /* Nothing to do during unload if previous bnx2x_nic_load()
3036 * have not completed successfully - all resources are released.
3038 * we can get here only after unsuccessful ndo_* callback, during which
3039 * dev->IFF_UP flag is still on.
3041 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3044 /* It's important to set the bp->state to the value different from
3045 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3046 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3048 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3051 /* indicate to VFs that the PF is going down */
3052 bnx2x_iov_channel_down(bp);
3054 if (CNIC_LOADED(bp))
3055 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3058 bnx2x_tx_disable(bp);
3059 netdev_reset_tc(bp->dev);
3061 bp->rx_mode = BNX2X_RX_MODE_NONE;
3063 del_timer_sync(&bp->timer);
3065 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3066 /* Set ALWAYS_ALIVE bit in shmem */
3067 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3068 bnx2x_drv_pulse(bp);
3069 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3070 bnx2x_save_statistics(bp);
3073 /* wait till consumers catch up with producers in all queues */
3074 bnx2x_drain_tx_queues(bp);
3076 /* if VF indicate to PF this function is going down (PF will delete sp
3077 * elements and clear initializations
3080 bnx2x_vfpf_close_vf(bp);
3081 else if (unload_mode != UNLOAD_RECOVERY)
3082 /* if this is a normal/close unload need to clean up chip*/
3083 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3085 /* Send the UNLOAD_REQUEST to the MCP */
3086 bnx2x_send_unload_req(bp, unload_mode);
3088 /* Prevent transactions to host from the functions on the
3089 * engine that doesn't reset global blocks in case of global
3090 * attention once global blocks are reset and gates are opened
3091 * (the engine which leader will perform the recovery
3094 if (!CHIP_IS_E1x(bp))
3095 bnx2x_pf_disable(bp);
3097 /* Disable HW interrupts, NAPI */
3098 bnx2x_netif_stop(bp, 1);
3099 /* Delete all NAPI objects */
3100 bnx2x_del_all_napi(bp);
3101 if (CNIC_LOADED(bp))
3102 bnx2x_del_all_napi_cnic(bp);
3106 /* Report UNLOAD_DONE to MCP */
3107 bnx2x_send_unload_done(bp, false);
3111 * At this stage no more interrupts will arrive so we may safely clean
3112 * the queueable objects here in case they failed to get cleaned so far.
3115 bnx2x_squeeze_objects(bp);
3117 /* There should be no more pending SP commands at this stage */
3122 /* clear pending work in rtnl task */
3123 bp->sp_rtnl_state = 0;
3126 /* Free SKBs, SGEs, TPA pool and driver internals */
3127 bnx2x_free_skbs(bp);
3128 if (CNIC_LOADED(bp))
3129 bnx2x_free_skbs_cnic(bp);
3130 for_each_rx_queue(bp, i)
3131 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3133 bnx2x_free_fp_mem(bp);
3134 if (CNIC_LOADED(bp))
3135 bnx2x_free_fp_mem_cnic(bp);
3138 if (CNIC_LOADED(bp))
3139 bnx2x_free_mem_cnic(bp);
3143 bp->state = BNX2X_STATE_CLOSED;
3144 bp->cnic_loaded = false;
3146 /* Clear driver version indication in shmem */
3147 if (IS_PF(bp) && !BP_NOMCP(bp))
3148 bnx2x_update_mng_version(bp);
3150 /* Check if there are pending parity attentions. If there are - set
3151 * RECOVERY_IN_PROGRESS.
3153 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3154 bnx2x_set_reset_in_progress(bp);
3156 /* Set RESET_IS_GLOBAL if needed */
3158 bnx2x_set_reset_global(bp);
3161 /* The last driver must disable a "close the gate" if there is no
3162 * parity attention or "process kill" pending.
3165 !bnx2x_clear_pf_load(bp) &&
3166 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3167 bnx2x_disable_close_the_gate(bp);
3169 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3174 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3178 /* If there is no power capability, silently succeed */
3179 if (!bp->pdev->pm_cap) {
3180 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3184 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3188 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3189 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3190 PCI_PM_CTRL_PME_STATUS));
3192 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3193 /* delay required during transition out of D3hot */
3198 /* If there are other clients above don't
3199 shut down the power */
3200 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3202 /* Don't shut down the power for emulation and FPGA */
3203 if (CHIP_REV_IS_SLOW(bp))
3206 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3210 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3212 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3215 /* No more memory access after this point until
3216 * device is brought back to D0.
3221 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3228 * net_device service functions
3230 static int bnx2x_poll(struct napi_struct *napi, int budget)
3234 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3236 struct bnx2x *bp = fp->bp;
3239 #ifdef BNX2X_STOP_ON_ERROR
3240 if (unlikely(bp->panic)) {
3241 napi_complete(napi);
3245 if (!bnx2x_fp_lock_napi(fp))
3248 for_each_cos_in_tx_queue(fp, cos)
3249 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3250 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3252 if (bnx2x_has_rx_work(fp)) {
3253 work_done += bnx2x_rx_int(fp, budget - work_done);
3255 /* must not complete if we consumed full budget */
3256 if (work_done >= budget) {
3257 bnx2x_fp_unlock_napi(fp);
3262 bnx2x_fp_unlock_napi(fp);
3264 /* Fall out from the NAPI loop if needed */
3265 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3267 /* No need to update SB for FCoE L2 ring as long as
3268 * it's connected to the default SB and the SB
3269 * has been updated when NAPI was scheduled.
3271 if (IS_FCOE_FP(fp)) {
3272 napi_complete(napi);
3275 bnx2x_update_fpsb_idx(fp);
3276 /* bnx2x_has_rx_work() reads the status block,
3277 * thus we need to ensure that status block indices
3278 * have been actually read (bnx2x_update_fpsb_idx)
3279 * prior to this check (bnx2x_has_rx_work) so that
3280 * we won't write the "newer" value of the status block
3281 * to IGU (if there was a DMA right after
3282 * bnx2x_has_rx_work and if there is no rmb, the memory
3283 * reading (bnx2x_update_fpsb_idx) may be postponed
3284 * to right before bnx2x_ack_sb). In this case there
3285 * will never be another interrupt until there is
3286 * another update of the status block, while there
3287 * is still unhandled work.
3291 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3292 napi_complete(napi);
3293 /* Re-enable interrupts */
3294 DP(NETIF_MSG_RX_STATUS,
3295 "Update index to %d\n", fp->fp_hc_idx);
3296 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3297 le16_to_cpu(fp->fp_hc_idx),
3307 #ifdef CONFIG_NET_RX_BUSY_POLL
3308 /* must be called with local_bh_disable()d */
3309 int bnx2x_low_latency_recv(struct napi_struct *napi)
3311 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3313 struct bnx2x *bp = fp->bp;
3316 if ((bp->state == BNX2X_STATE_CLOSED) ||
3317 (bp->state == BNX2X_STATE_ERROR) ||
3318 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
3319 return LL_FLUSH_FAILED;
3321 if (!bnx2x_fp_lock_poll(fp))
3322 return LL_FLUSH_BUSY;
3324 if (bnx2x_has_rx_work(fp))
3325 found = bnx2x_rx_int(fp, 4);
3327 bnx2x_fp_unlock_poll(fp);
3333 /* we split the first BD into headers and data BDs
3334 * to ease the pain of our fellow microcode engineers
3335 * we use one mapping for both BDs
3337 static u16 bnx2x_tx_split(struct bnx2x *bp,
3338 struct bnx2x_fp_txdata *txdata,
3339 struct sw_tx_bd *tx_buf,
3340 struct eth_tx_start_bd **tx_bd, u16 hlen,
3343 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3344 struct eth_tx_bd *d_tx_bd;
3346 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3348 /* first fix first BD */
3349 h_tx_bd->nbytes = cpu_to_le16(hlen);
3351 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3352 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3354 /* now get a new data BD
3355 * (after the pbd) and fill it */
3356 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3357 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3359 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3360 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3362 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3363 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3364 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3366 /* this marks the BD as one that has no individual mapping */
3367 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3369 DP(NETIF_MSG_TX_QUEUED,
3370 "TSO split data size is %d (%x:%x)\n",
3371 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3374 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3379 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3380 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3381 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3383 __sum16 tsum = (__force __sum16) csum;
3386 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3387 csum_partial(t_header - fix, fix, 0)));
3390 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3391 csum_partial(t_header, -fix, 0)));
3393 return bswab16(tsum);
3396 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3402 if (skb->ip_summed != CHECKSUM_PARTIAL)
3405 protocol = vlan_get_protocol(skb);
3406 if (protocol == htons(ETH_P_IPV6)) {
3408 prot = ipv6_hdr(skb)->nexthdr;
3411 prot = ip_hdr(skb)->protocol;
3414 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3415 if (inner_ip_hdr(skb)->version == 6) {
3416 rc |= XMIT_CSUM_ENC_V6;
3417 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3418 rc |= XMIT_CSUM_TCP;
3420 rc |= XMIT_CSUM_ENC_V4;
3421 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3422 rc |= XMIT_CSUM_TCP;
3425 if (prot == IPPROTO_TCP)
3426 rc |= XMIT_CSUM_TCP;
3428 if (skb_is_gso(skb)) {
3429 if (skb_is_gso_v6(skb)) {
3430 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3431 if (rc & XMIT_CSUM_ENC)
3432 rc |= XMIT_GSO_ENC_V6;
3434 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3435 if (rc & XMIT_CSUM_ENC)
3436 rc |= XMIT_GSO_ENC_V4;
3443 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3444 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3446 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3447 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3449 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3450 /* check if packet requires linearization (packet is too fragmented)
3451 no need to check fragmentation if page size > 8K (there will be no
3452 violation to FW restrictions) */
3453 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3456 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3457 int to_copy = 0, hlen = 0;
3459 if (xmit_type & XMIT_GSO_ENC)
3460 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3462 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3463 if (xmit_type & XMIT_GSO) {
3464 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3465 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3466 /* Number of windows to check */
3467 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3472 /* Headers length */
3473 if (xmit_type & XMIT_GSO_ENC)
3474 hlen = (int)(skb_inner_transport_header(skb) -
3476 inner_tcp_hdrlen(skb);
3478 hlen = (int)(skb_transport_header(skb) -
3479 skb->data) + tcp_hdrlen(skb);
3481 /* Amount of data (w/o headers) on linear part of SKB*/
3482 first_bd_sz = skb_headlen(skb) - hlen;
3484 wnd_sum = first_bd_sz;
3486 /* Calculate the first sum - it's special */
3487 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3489 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3491 /* If there was data on linear skb data - check it */
3492 if (first_bd_sz > 0) {
3493 if (unlikely(wnd_sum < lso_mss)) {
3498 wnd_sum -= first_bd_sz;
3501 /* Others are easier: run through the frag list and
3502 check all windows */
3503 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3505 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3507 if (unlikely(wnd_sum < lso_mss)) {
3512 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3515 /* in non-LSO too fragmented packet should always
3522 if (unlikely(to_copy))
3523 DP(NETIF_MSG_TX_QUEUED,
3524 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3525 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3526 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3533 * bnx2x_set_pbd_gso - update PBD in GSO case.
3537 * @xmit_type: xmit flags
3539 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3540 struct eth_tx_parse_bd_e1x *pbd,
3543 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3544 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3545 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3547 if (xmit_type & XMIT_GSO_V4) {
3548 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3549 pbd->tcp_pseudo_csum =
3550 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3552 0, IPPROTO_TCP, 0));
3554 pbd->tcp_pseudo_csum =
3555 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3556 &ipv6_hdr(skb)->daddr,
3557 0, IPPROTO_TCP, 0));
3561 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3565 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3567 * @bp: driver handle
3569 * @parsing_data: data to be updated
3570 * @xmit_type: xmit flags
3572 * 57712/578xx related, when skb has encapsulation
3574 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3575 u32 *parsing_data, u32 xmit_type)
3578 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3579 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3580 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3582 if (xmit_type & XMIT_CSUM_TCP) {
3583 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3584 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3585 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3587 return skb_inner_transport_header(skb) +
3588 inner_tcp_hdrlen(skb) - skb->data;
3591 /* We support checksum offload for TCP and UDP only.
3592 * No need to pass the UDP header length - it's a constant.
3594 return skb_inner_transport_header(skb) +
3595 sizeof(struct udphdr) - skb->data;
3599 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3601 * @bp: driver handle
3603 * @parsing_data: data to be updated
3604 * @xmit_type: xmit flags
3606 * 57712/578xx related
3608 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3609 u32 *parsing_data, u32 xmit_type)
3612 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3613 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3614 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3616 if (xmit_type & XMIT_CSUM_TCP) {
3617 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3618 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3619 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3621 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3623 /* We support checksum offload for TCP and UDP only.
3624 * No need to pass the UDP header length - it's a constant.
3626 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3629 /* set FW indication according to inner or outer protocols if tunneled */
3630 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3631 struct eth_tx_start_bd *tx_start_bd,
3634 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3636 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3637 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3639 if (!(xmit_type & XMIT_CSUM_TCP))
3640 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3644 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3646 * @bp: driver handle
3648 * @pbd: parse BD to be updated
3649 * @xmit_type: xmit flags
3651 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3652 struct eth_tx_parse_bd_e1x *pbd,
3655 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3657 /* for now NS flag is not used in Linux */
3660 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3661 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3663 pbd->ip_hlen_w = (skb_transport_header(skb) -
3664 skb_network_header(skb)) >> 1;
3666 hlen += pbd->ip_hlen_w;
3668 /* We support checksum offload for TCP and UDP only */
3669 if (xmit_type & XMIT_CSUM_TCP)
3670 hlen += tcp_hdrlen(skb) / 2;
3672 hlen += sizeof(struct udphdr) / 2;
3674 pbd->total_hlen_w = cpu_to_le16(hlen);
3677 if (xmit_type & XMIT_CSUM_TCP) {
3678 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3681 s8 fix = SKB_CS_OFF(skb); /* signed! */
3683 DP(NETIF_MSG_TX_QUEUED,
3684 "hlen %d fix %d csum before fix %x\n",
3685 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3687 /* HW bug: fixup the CSUM */
3688 pbd->tcp_pseudo_csum =
3689 bnx2x_csum_fix(skb_transport_header(skb),
3692 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3693 pbd->tcp_pseudo_csum);
3699 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3700 struct eth_tx_parse_bd_e2 *pbd_e2,
3701 struct eth_tx_parse_2nd_bd *pbd2,
3706 u8 outerip_off, outerip_len = 0;
3708 /* from outer IP to transport */
3709 hlen_w = (skb_inner_transport_header(skb) -
3710 skb_network_header(skb)) >> 1;
3713 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3715 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3717 /* outer IP header info */
3718 if (xmit_type & XMIT_CSUM_V4) {
3719 struct iphdr *iph = ip_hdr(skb);
3720 u32 csum = (__force u32)(~iph->check) -
3721 (__force u32)iph->tot_len -
3722 (__force u32)iph->frag_off;
3724 outerip_len = iph->ihl << 1;
3726 pbd2->fw_ip_csum_wo_len_flags_frag =
3727 bswab16(csum_fold((__force __wsum)csum));
3729 pbd2->fw_ip_hdr_to_payload_w =
3730 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3731 pbd_e2->data.tunnel_data.flags |=
3732 ETH_TUNNEL_DATA_IPV6_OUTER;
3735 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3737 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3739 /* inner IP header info */
3740 if (xmit_type & XMIT_CSUM_ENC_V4) {
3741 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3743 pbd_e2->data.tunnel_data.pseudo_csum =
3744 bswab16(~csum_tcpudp_magic(
3745 inner_ip_hdr(skb)->saddr,
3746 inner_ip_hdr(skb)->daddr,
3747 0, IPPROTO_TCP, 0));
3749 pbd_e2->data.tunnel_data.pseudo_csum =
3750 bswab16(~csum_ipv6_magic(
3751 &inner_ipv6_hdr(skb)->saddr,
3752 &inner_ipv6_hdr(skb)->daddr,
3753 0, IPPROTO_TCP, 0));
3756 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3761 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3762 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3763 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3765 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3766 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3767 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3771 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3774 struct ipv6hdr *ipv6;
3776 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3779 if (xmit_type & XMIT_GSO_ENC_V6)
3780 ipv6 = inner_ipv6_hdr(skb);
3781 else /* XMIT_GSO_V6 */
3782 ipv6 = ipv6_hdr(skb);
3784 if (ipv6->nexthdr == NEXTHDR_IPV6)
3785 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3788 /* called with netif_tx_lock
3789 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3790 * netif_wake_queue()
3792 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3794 struct bnx2x *bp = netdev_priv(dev);
3796 struct netdev_queue *txq;
3797 struct bnx2x_fp_txdata *txdata;
3798 struct sw_tx_bd *tx_buf;
3799 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3800 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3801 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3802 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3803 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3804 u32 pbd_e2_parsing_data = 0;
3805 u16 pkt_prod, bd_prod;
3808 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3811 __le16 pkt_size = 0;
3813 u8 mac_type = UNICAST_ADDRESS;
3815 #ifdef BNX2X_STOP_ON_ERROR
3816 if (unlikely(bp->panic))
3817 return NETDEV_TX_BUSY;
3820 txq_index = skb_get_queue_mapping(skb);
3821 txq = netdev_get_tx_queue(dev, txq_index);
3823 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3825 txdata = &bp->bnx2x_txq[txq_index];
3827 /* enable this debug print to view the transmission queue being used
3828 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3829 txq_index, fp_index, txdata_index); */
3831 /* enable this debug print to view the transmission details
3832 DP(NETIF_MSG_TX_QUEUED,
3833 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3834 txdata->cid, fp_index, txdata_index, txdata, fp); */
3836 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3837 skb_shinfo(skb)->nr_frags +
3839 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3840 /* Handle special storage cases separately */
3841 if (txdata->tx_ring_size == 0) {
3842 struct bnx2x_eth_q_stats *q_stats =
3843 bnx2x_fp_qstats(bp, txdata->parent_fp);
3844 q_stats->driver_filtered_tx_pkt++;
3846 return NETDEV_TX_OK;
3848 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3849 netif_tx_stop_queue(txq);
3850 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3852 return NETDEV_TX_BUSY;
3855 DP(NETIF_MSG_TX_QUEUED,
3856 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3857 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3858 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3861 eth = (struct ethhdr *)skb->data;
3863 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3864 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3865 if (is_broadcast_ether_addr(eth->h_dest))
3866 mac_type = BROADCAST_ADDRESS;
3868 mac_type = MULTICAST_ADDRESS;
3871 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3872 /* First, check if we need to linearize the skb (due to FW
3873 restrictions). No need to check fragmentation if page size > 8K
3874 (there will be no violation to FW restrictions) */
3875 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3876 /* Statistics of linearization */
3878 if (skb_linearize(skb) != 0) {
3879 DP(NETIF_MSG_TX_QUEUED,
3880 "SKB linearization failed - silently dropping this SKB\n");
3881 dev_kfree_skb_any(skb);
3882 return NETDEV_TX_OK;
3886 /* Map skb linear data for DMA */
3887 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3888 skb_headlen(skb), DMA_TO_DEVICE);
3889 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3890 DP(NETIF_MSG_TX_QUEUED,
3891 "SKB mapping failed - silently dropping this SKB\n");
3892 dev_kfree_skb_any(skb);
3893 return NETDEV_TX_OK;
3896 Please read carefully. First we use one BD which we mark as start,
3897 then we have a parsing info BD (used for TSO or xsum),
3898 and only then we have the rest of the TSO BDs.
3899 (don't forget to mark the last one as last,
3900 and to unmap only AFTER you write to the BD ...)
3901 And above all, all pdb sizes are in words - NOT DWORDS!
3904 /* get current pkt produced now - advance it just before sending packet
3905 * since mapping of pages may fail and cause packet to be dropped
3907 pkt_prod = txdata->tx_pkt_prod;
3908 bd_prod = TX_BD(txdata->tx_bd_prod);
3910 /* get a tx_buf and first BD
3911 * tx_start_bd may be changed during SPLIT,
3912 * but first_bd will always stay first
3914 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3915 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3916 first_bd = tx_start_bd;
3918 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3920 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3921 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3922 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3923 } else if (bp->ptp_tx_skb) {
3924 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3926 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3927 /* schedule check for Tx timestamp */
3928 bp->ptp_tx_skb = skb_get(skb);
3929 bp->ptp_tx_start = jiffies;
3930 schedule_work(&bp->ptp_task);
3934 /* header nbd: indirectly zero other flags! */
3935 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3937 /* remember the first BD of the packet */
3938 tx_buf->first_bd = txdata->tx_bd_prod;
3942 DP(NETIF_MSG_TX_QUEUED,
3943 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3944 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3946 if (skb_vlan_tag_present(skb)) {
3947 tx_start_bd->vlan_or_ethertype =
3948 cpu_to_le16(skb_vlan_tag_get(skb));
3949 tx_start_bd->bd_flags.as_bitfield |=
3950 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3952 /* when transmitting in a vf, start bd must hold the ethertype
3953 * for fw to enforce it
3956 #ifndef BNX2X_STOP_ON_ERROR
3959 /* Still need to consider inband vlan for enforced */
3960 if (__vlan_get_tag(skb, &vlan_tci)) {
3961 tx_start_bd->vlan_or_ethertype =
3962 cpu_to_le16(ntohs(eth->h_proto));
3964 tx_start_bd->bd_flags.as_bitfield |=
3965 (X_ETH_INBAND_VLAN <<
3966 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3967 tx_start_bd->vlan_or_ethertype =
3968 cpu_to_le16(vlan_tci);
3970 #ifndef BNX2X_STOP_ON_ERROR
3972 /* used by FW for packet accounting */
3973 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3978 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3980 /* turn on parsing and get a BD */
3981 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3983 if (xmit_type & XMIT_CSUM)
3984 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3986 if (!CHIP_IS_E1x(bp)) {
3987 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3988 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3990 if (xmit_type & XMIT_CSUM_ENC) {
3991 u16 global_data = 0;
3993 /* Set PBD in enc checksum offload case */
3994 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3995 &pbd_e2_parsing_data,
3998 /* turn on 2nd parsing and get a BD */
3999 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4001 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
4003 memset(pbd2, 0, sizeof(*pbd2));
4005 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
4006 (skb_inner_network_header(skb) -
4009 if (xmit_type & XMIT_GSO_ENC)
4010 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
4014 pbd2->global_data = cpu_to_le16(global_data);
4016 /* add addition parse BD indication to start BD */
4017 SET_FLAG(tx_start_bd->general_data,
4018 ETH_TX_START_BD_PARSE_NBDS, 1);
4019 /* set encapsulation flag in start BD */
4020 SET_FLAG(tx_start_bd->general_data,
4021 ETH_TX_START_BD_TUNNEL_EXIST, 1);
4023 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
4026 } else if (xmit_type & XMIT_CSUM) {
4027 /* Set PBD in checksum offload case w/o encapsulation */
4028 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
4029 &pbd_e2_parsing_data,
4033 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
4034 /* Add the macs to the parsing BD if this is a vf or if
4035 * Tx Switching is enabled.
4038 /* override GRE parameters in BD */
4039 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4040 &pbd_e2->data.mac_addr.src_mid,
4041 &pbd_e2->data.mac_addr.src_lo,
4044 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4045 &pbd_e2->data.mac_addr.dst_mid,
4046 &pbd_e2->data.mac_addr.dst_lo,
4049 if (bp->flags & TX_SWITCHING)
4050 bnx2x_set_fw_mac_addr(
4051 &pbd_e2->data.mac_addr.dst_hi,
4052 &pbd_e2->data.mac_addr.dst_mid,
4053 &pbd_e2->data.mac_addr.dst_lo,
4055 #ifdef BNX2X_STOP_ON_ERROR
4056 /* Enforce security is always set in Stop on Error -
4057 * source mac should be present in the parsing BD
4059 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4060 &pbd_e2->data.mac_addr.src_mid,
4061 &pbd_e2->data.mac_addr.src_lo,
4066 SET_FLAG(pbd_e2_parsing_data,
4067 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4069 u16 global_data = 0;
4070 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4071 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4072 /* Set PBD in checksum offload case */
4073 if (xmit_type & XMIT_CSUM)
4074 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4076 SET_FLAG(global_data,
4077 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4078 pbd_e1x->global_data |= cpu_to_le16(global_data);
4081 /* Setup the data pointer of the first BD of the packet */
4082 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4083 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4084 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4085 pkt_size = tx_start_bd->nbytes;
4087 DP(NETIF_MSG_TX_QUEUED,
4088 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4089 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4090 le16_to_cpu(tx_start_bd->nbytes),
4091 tx_start_bd->bd_flags.as_bitfield,
4092 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4094 if (xmit_type & XMIT_GSO) {
4096 DP(NETIF_MSG_TX_QUEUED,
4097 "TSO packet len %d hlen %d total len %d tso size %d\n",
4098 skb->len, hlen, skb_headlen(skb),
4099 skb_shinfo(skb)->gso_size);
4101 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4103 if (unlikely(skb_headlen(skb) > hlen)) {
4105 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4109 if (!CHIP_IS_E1x(bp))
4110 pbd_e2_parsing_data |=
4111 (skb_shinfo(skb)->gso_size <<
4112 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4113 ETH_TX_PARSE_BD_E2_LSO_MSS;
4115 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4118 /* Set the PBD's parsing_data field if not zero
4119 * (for the chips newer than 57711).
4121 if (pbd_e2_parsing_data)
4122 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4124 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4126 /* Handle fragmented skb */
4127 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4128 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4130 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4131 skb_frag_size(frag), DMA_TO_DEVICE);
4132 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4133 unsigned int pkts_compl = 0, bytes_compl = 0;
4135 DP(NETIF_MSG_TX_QUEUED,
4136 "Unable to map page - dropping packet...\n");
4138 /* we need unmap all buffers already mapped
4140 * first_bd->nbd need to be properly updated
4141 * before call to bnx2x_free_tx_pkt
4143 first_bd->nbd = cpu_to_le16(nbd);
4144 bnx2x_free_tx_pkt(bp, txdata,
4145 TX_BD(txdata->tx_pkt_prod),
4146 &pkts_compl, &bytes_compl);
4147 return NETDEV_TX_OK;
4150 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4151 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4152 if (total_pkt_bd == NULL)
4153 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4155 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4156 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4157 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4158 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4161 DP(NETIF_MSG_TX_QUEUED,
4162 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4163 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4164 le16_to_cpu(tx_data_bd->nbytes));
4167 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4169 /* update with actual num BDs */
4170 first_bd->nbd = cpu_to_le16(nbd);
4172 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4174 /* now send a tx doorbell, counting the next BD
4175 * if the packet contains or ends with it
4177 if (TX_BD_POFF(bd_prod) < nbd)
4180 /* total_pkt_bytes should be set on the first data BD if
4181 * it's not an LSO packet and there is more than one
4182 * data BD. In this case pkt_size is limited by an MTU value.
4183 * However we prefer to set it for an LSO packet (while we don't
4184 * have to) in order to save some CPU cycles in a none-LSO
4185 * case, when we much more care about them.
4187 if (total_pkt_bd != NULL)
4188 total_pkt_bd->total_pkt_bytes = pkt_size;
4191 DP(NETIF_MSG_TX_QUEUED,
4192 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4193 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4194 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4195 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4196 le16_to_cpu(pbd_e1x->total_hlen_w));
4198 DP(NETIF_MSG_TX_QUEUED,
4199 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4201 pbd_e2->data.mac_addr.dst_hi,
4202 pbd_e2->data.mac_addr.dst_mid,
4203 pbd_e2->data.mac_addr.dst_lo,
4204 pbd_e2->data.mac_addr.src_hi,
4205 pbd_e2->data.mac_addr.src_mid,
4206 pbd_e2->data.mac_addr.src_lo,
4207 pbd_e2->parsing_data);
4208 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4210 netdev_tx_sent_queue(txq, skb->len);
4212 skb_tx_timestamp(skb);
4214 txdata->tx_pkt_prod++;
4216 * Make sure that the BD data is updated before updating the producer
4217 * since FW might read the BD right after the producer is updated.
4218 * This is only applicable for weak-ordered memory model archs such
4219 * as IA-64. The following barrier is also mandatory since FW will
4220 * assumes packets must have BDs.
4224 txdata->tx_db.data.prod += nbd;
4227 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4231 txdata->tx_bd_prod += nbd;
4233 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4234 netif_tx_stop_queue(txq);
4236 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4237 * ordering of set_bit() in netif_tx_stop_queue() and read of
4241 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4242 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4243 netif_tx_wake_queue(txq);
4247 return NETDEV_TX_OK;
4250 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4252 int mfw_vn = BP_FW_MB_IDX(bp);
4255 /* If the shmem shouldn't affect configuration, reflect */
4256 if (!IS_MF_BD(bp)) {
4259 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4266 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4267 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4268 c2s_map[0] = tmp & 0xff;
4269 c2s_map[1] = (tmp >> 8) & 0xff;
4270 c2s_map[2] = (tmp >> 16) & 0xff;
4271 c2s_map[3] = (tmp >> 24) & 0xff;
4273 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4274 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4275 c2s_map[4] = tmp & 0xff;
4276 c2s_map[5] = (tmp >> 8) & 0xff;
4277 c2s_map[6] = (tmp >> 16) & 0xff;
4278 c2s_map[7] = (tmp >> 24) & 0xff;
4280 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4281 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4282 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4286 * bnx2x_setup_tc - routine to configure net_device for multi tc
4288 * @netdev: net device to configure
4289 * @tc: number of traffic classes to enable
4291 * callback connected to the ndo_setup_tc function pointer
4293 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4295 struct bnx2x *bp = netdev_priv(dev);
4296 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4297 int cos, prio, count, offset;
4299 /* setup tc must be called under rtnl lock */
4302 /* no traffic classes requested. Aborting */
4304 netdev_reset_tc(dev);
4308 /* requested to support too many traffic classes */
4309 if (num_tc > bp->max_cos) {
4310 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4311 num_tc, bp->max_cos);
4315 /* declare amount of supported traffic classes */
4316 if (netdev_set_num_tc(dev, num_tc)) {
4317 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4321 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4323 /* configure priority to traffic class mapping */
4324 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4325 int outer_prio = c2s_map[prio];
4327 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4328 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4329 "mapping priority %d to tc %d\n",
4330 outer_prio, bp->prio_to_cos[outer_prio]);
4333 /* Use this configuration to differentiate tc0 from other COSes
4334 This can be used for ets or pfc, and save the effort of setting
4335 up a multio class queue disc or negotiating DCBX with a switch
4336 netdev_set_prio_tc_map(dev, 0, 0);
4337 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4338 for (prio = 1; prio < 16; prio++) {
4339 netdev_set_prio_tc_map(dev, prio, 1);
4340 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4343 /* configure traffic class to transmission queue mapping */
4344 for (cos = 0; cos < bp->max_cos; cos++) {
4345 count = BNX2X_NUM_ETH_QUEUES(bp);
4346 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4347 netdev_set_tc_queue(dev, cos, count, offset);
4348 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4349 "mapping tc %d to offset %d count %d\n",
4350 cos, offset, count);
4356 /* called with rtnl_lock */
4357 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4359 struct sockaddr *addr = p;
4360 struct bnx2x *bp = netdev_priv(dev);
4363 if (!is_valid_ether_addr(addr->sa_data)) {
4364 BNX2X_ERR("Requested MAC address is not valid\n");
4368 if (IS_MF_STORAGE_ONLY(bp)) {
4369 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4373 if (netif_running(dev)) {
4374 rc = bnx2x_set_eth_mac(bp, false);
4379 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4381 if (netif_running(dev))
4382 rc = bnx2x_set_eth_mac(bp, true);
4384 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4385 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4390 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4392 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4393 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4398 if (IS_FCOE_IDX(fp_index)) {
4399 memset(sb, 0, sizeof(union host_hc_status_block));
4400 fp->status_blk_mapping = 0;
4403 if (!CHIP_IS_E1x(bp))
4404 BNX2X_PCI_FREE(sb->e2_sb,
4405 bnx2x_fp(bp, fp_index,
4406 status_blk_mapping),
4407 sizeof(struct host_hc_status_block_e2));
4409 BNX2X_PCI_FREE(sb->e1x_sb,
4410 bnx2x_fp(bp, fp_index,
4411 status_blk_mapping),
4412 sizeof(struct host_hc_status_block_e1x));
4416 if (!skip_rx_queue(bp, fp_index)) {
4417 bnx2x_free_rx_bds(fp);
4419 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4420 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4421 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4422 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4423 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4425 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4426 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4427 sizeof(struct eth_fast_path_rx_cqe) *
4431 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4432 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4433 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4434 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4438 if (!skip_tx_queue(bp, fp_index)) {
4439 /* fastpath tx rings: tx_buf tx_desc */
4440 for_each_cos_in_tx_queue(fp, cos) {
4441 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4443 DP(NETIF_MSG_IFDOWN,
4444 "freeing tx memory of fp %d cos %d cid %d\n",
4445 fp_index, cos, txdata->cid);
4447 BNX2X_FREE(txdata->tx_buf_ring);
4448 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4449 txdata->tx_desc_mapping,
4450 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4453 /* end of fastpath */
4456 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4459 for_each_cnic_queue(bp, i)
4460 bnx2x_free_fp_mem_at(bp, i);
4463 void bnx2x_free_fp_mem(struct bnx2x *bp)
4466 for_each_eth_queue(bp, i)
4467 bnx2x_free_fp_mem_at(bp, i);
4470 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4472 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4473 if (!CHIP_IS_E1x(bp)) {
4474 bnx2x_fp(bp, index, sb_index_values) =
4475 (__le16 *)status_blk.e2_sb->sb.index_values;
4476 bnx2x_fp(bp, index, sb_running_index) =
4477 (__le16 *)status_blk.e2_sb->sb.running_index;
4479 bnx2x_fp(bp, index, sb_index_values) =
4480 (__le16 *)status_blk.e1x_sb->sb.index_values;
4481 bnx2x_fp(bp, index, sb_running_index) =
4482 (__le16 *)status_blk.e1x_sb->sb.running_index;
4486 /* Returns the number of actually allocated BDs */
4487 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4490 struct bnx2x *bp = fp->bp;
4491 u16 ring_prod, cqe_ring_prod;
4492 int i, failure_cnt = 0;
4494 fp->rx_comp_cons = 0;
4495 cqe_ring_prod = ring_prod = 0;
4497 /* This routine is called only during fo init so
4498 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4500 for (i = 0; i < rx_ring_size; i++) {
4501 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4505 ring_prod = NEXT_RX_IDX(ring_prod);
4506 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4507 WARN_ON(ring_prod <= (i - failure_cnt));
4511 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4512 i - failure_cnt, fp->index);
4514 fp->rx_bd_prod = ring_prod;
4515 /* Limit the CQE producer by the CQE ring size */
4516 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4518 fp->rx_pkt = fp->rx_calls = 0;
4520 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4522 return i - failure_cnt;
4525 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4529 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4530 struct eth_rx_cqe_next_page *nextpg;
4532 nextpg = (struct eth_rx_cqe_next_page *)
4533 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4535 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4536 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4538 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4539 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4543 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4545 union host_hc_status_block *sb;
4546 struct bnx2x_fastpath *fp = &bp->fp[index];
4549 int rx_ring_size = 0;
4551 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4552 rx_ring_size = MIN_RX_SIZE_NONTPA;
4553 bp->rx_ring_size = rx_ring_size;
4554 } else if (!bp->rx_ring_size) {
4555 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4557 if (CHIP_IS_E3(bp)) {
4558 u32 cfg = SHMEM_RD(bp,
4559 dev_info.port_hw_config[BP_PORT(bp)].
4562 /* Decrease ring size for 1G functions */
4563 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4564 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4568 /* allocate at least number of buffers required by FW */
4569 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4570 MIN_RX_SIZE_TPA, rx_ring_size);
4572 bp->rx_ring_size = rx_ring_size;
4573 } else /* if rx_ring_size specified - use it */
4574 rx_ring_size = bp->rx_ring_size;
4576 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4579 sb = &bnx2x_fp(bp, index, status_blk);
4581 if (!IS_FCOE_IDX(index)) {
4583 if (!CHIP_IS_E1x(bp)) {
4584 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4585 sizeof(struct host_hc_status_block_e2));
4589 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4590 sizeof(struct host_hc_status_block_e1x));
4596 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4597 * set shortcuts for it.
4599 if (!IS_FCOE_IDX(index))
4600 set_sb_shortcuts(bp, index);
4603 if (!skip_tx_queue(bp, index)) {
4604 /* fastpath tx rings: tx_buf tx_desc */
4605 for_each_cos_in_tx_queue(fp, cos) {
4606 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4609 "allocating tx memory of fp %d cos %d\n",
4612 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4613 sizeof(struct sw_tx_bd),
4615 if (!txdata->tx_buf_ring)
4617 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4618 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4619 if (!txdata->tx_desc_ring)
4625 if (!skip_rx_queue(bp, index)) {
4626 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4627 bnx2x_fp(bp, index, rx_buf_ring) =
4628 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4629 if (!bnx2x_fp(bp, index, rx_buf_ring))
4631 bnx2x_fp(bp, index, rx_desc_ring) =
4632 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4633 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4634 if (!bnx2x_fp(bp, index, rx_desc_ring))
4637 /* Seed all CQEs by 1s */
4638 bnx2x_fp(bp, index, rx_comp_ring) =
4639 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4640 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4641 if (!bnx2x_fp(bp, index, rx_comp_ring))
4645 bnx2x_fp(bp, index, rx_page_ring) =
4646 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4648 if (!bnx2x_fp(bp, index, rx_page_ring))
4650 bnx2x_fp(bp, index, rx_sge_ring) =
4651 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4652 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4653 if (!bnx2x_fp(bp, index, rx_sge_ring))
4656 bnx2x_set_next_page_rx_bd(fp);
4659 bnx2x_set_next_page_rx_cq(fp);
4662 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4663 if (ring_size < rx_ring_size)
4669 /* handles low memory cases */
4671 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4673 /* FW will drop all packets if queue is not big enough,
4674 * In these cases we disable the queue
4675 * Min size is different for OOO, TPA and non-TPA queues
4677 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4678 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4679 /* release memory allocated for this queue */
4680 bnx2x_free_fp_mem_at(bp, index);
4686 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4690 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4691 /* we will fail load process instead of mark
4699 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4703 /* 1. Allocate FP for leading - fatal if error
4704 * 2. Allocate RSS - fix number of queues if error
4708 if (bnx2x_alloc_fp_mem_at(bp, 0))
4712 for_each_nondefault_eth_queue(bp, i)
4713 if (bnx2x_alloc_fp_mem_at(bp, i))
4716 /* handle memory failures */
4717 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4718 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4721 bnx2x_shrink_eth_fp(bp, delta);
4722 if (CNIC_SUPPORT(bp))
4723 /* move non eth FPs next to last eth FP
4724 * must be done in that order
4725 * FCOE_IDX < FWD_IDX < OOO_IDX
4728 /* move FCoE fp even NO_FCOE_FLAG is on */
4729 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4730 bp->num_ethernet_queues -= delta;
4731 bp->num_queues = bp->num_ethernet_queues +
4732 bp->num_cnic_queues;
4733 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4734 bp->num_queues + delta, bp->num_queues);
4740 void bnx2x_free_mem_bp(struct bnx2x *bp)
4744 for (i = 0; i < bp->fp_array_size; i++)
4745 kfree(bp->fp[i].tpa_info);
4748 kfree(bp->fp_stats);
4749 kfree(bp->bnx2x_txq);
4750 kfree(bp->msix_table);
4754 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4756 struct bnx2x_fastpath *fp;
4757 struct msix_entry *tbl;
4758 struct bnx2x_ilt *ilt;
4759 int msix_table_size = 0;
4760 int fp_array_size, txq_array_size;
4764 * The biggest MSI-X table we might need is as a maximum number of fast
4765 * path IGU SBs plus default SB (for PF only).
4767 msix_table_size = bp->igu_sb_cnt;
4770 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4772 /* fp array: RSS plus CNIC related L2 queues */
4773 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4774 bp->fp_array_size = fp_array_size;
4775 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4777 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4780 for (i = 0; i < bp->fp_array_size; i++) {
4782 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4783 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4784 if (!(fp[i].tpa_info))
4790 /* allocate sp objs */
4791 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4796 /* allocate fp_stats */
4797 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4802 /* Allocate memory for the transmission queues array */
4804 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4805 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4807 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4813 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4816 bp->msix_table = tbl;
4819 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4826 bnx2x_free_mem_bp(bp);
4830 int bnx2x_reload_if_running(struct net_device *dev)
4832 struct bnx2x *bp = netdev_priv(dev);
4834 if (unlikely(!netif_running(dev)))
4837 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4838 return bnx2x_nic_load(bp, LOAD_NORMAL);
4841 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4843 u32 sel_phy_idx = 0;
4844 if (bp->link_params.num_phys <= 1)
4847 if (bp->link_vars.link_up) {
4848 sel_phy_idx = EXT_PHY1;
4849 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4850 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4851 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4852 sel_phy_idx = EXT_PHY2;
4855 switch (bnx2x_phy_selection(&bp->link_params)) {
4856 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4857 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4858 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4859 sel_phy_idx = EXT_PHY1;
4861 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4862 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4863 sel_phy_idx = EXT_PHY2;
4870 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4872 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4874 * The selected activated PHY is always after swapping (in case PHY
4875 * swapping is enabled). So when swapping is enabled, we need to reverse
4879 if (bp->link_params.multi_phy_config &
4880 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4881 if (sel_phy_idx == EXT_PHY1)
4882 sel_phy_idx = EXT_PHY2;
4883 else if (sel_phy_idx == EXT_PHY2)
4884 sel_phy_idx = EXT_PHY1;
4886 return LINK_CONFIG_IDX(sel_phy_idx);
4889 #ifdef NETDEV_FCOE_WWNN
4890 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4892 struct bnx2x *bp = netdev_priv(dev);
4893 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4896 case NETDEV_FCOE_WWNN:
4897 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4898 cp->fcoe_wwn_node_name_lo);
4900 case NETDEV_FCOE_WWPN:
4901 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4902 cp->fcoe_wwn_port_name_lo);
4905 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4913 /* called with rtnl_lock */
4914 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4916 struct bnx2x *bp = netdev_priv(dev);
4918 if (pci_num_vf(bp->pdev)) {
4919 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4923 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4924 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4928 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4929 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4930 BNX2X_ERR("Can't support requested MTU size\n");
4934 /* This does not race with packet allocation
4935 * because the actual alloc size is
4936 * only updated as part of load
4940 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4941 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4943 return bnx2x_reload_if_running(dev);
4946 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4947 netdev_features_t features)
4949 struct bnx2x *bp = netdev_priv(dev);
4951 if (pci_num_vf(bp->pdev)) {
4952 netdev_features_t changed = dev->features ^ features;
4954 /* Revert the requested changes in features if they
4955 * would require internal reload of PF in bnx2x_set_features().
4957 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4958 features &= ~NETIF_F_RXCSUM;
4959 features |= dev->features & NETIF_F_RXCSUM;
4962 if (changed & NETIF_F_LOOPBACK) {
4963 features &= ~NETIF_F_LOOPBACK;
4964 features |= dev->features & NETIF_F_LOOPBACK;
4968 /* TPA requires Rx CSUM offloading */
4969 if (!(features & NETIF_F_RXCSUM)) {
4970 features &= ~NETIF_F_LRO;
4971 features &= ~NETIF_F_GRO;
4977 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4979 struct bnx2x *bp = netdev_priv(dev);
4980 netdev_features_t changes = features ^ dev->features;
4981 bool bnx2x_reload = false;
4984 /* VFs or non SRIOV PFs should be able to change loopback feature */
4985 if (!pci_num_vf(bp->pdev)) {
4986 if (features & NETIF_F_LOOPBACK) {
4987 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4988 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4989 bnx2x_reload = true;
4992 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4993 bp->link_params.loopback_mode = LOOPBACK_NONE;
4994 bnx2x_reload = true;
4999 /* if GRO is changed while LRO is enabled, don't force a reload */
5000 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
5001 changes &= ~NETIF_F_GRO;
5003 /* if GRO is changed while HW TPA is off, don't force a reload */
5004 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
5005 changes &= ~NETIF_F_GRO;
5008 bnx2x_reload = true;
5011 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
5012 dev->features = features;
5013 rc = bnx2x_reload_if_running(dev);
5016 /* else: bnx2x_nic_load() will be called at end of recovery */
5022 void bnx2x_tx_timeout(struct net_device *dev)
5024 struct bnx2x *bp = netdev_priv(dev);
5026 #ifdef BNX2X_STOP_ON_ERROR
5031 /* This allows the netif to be shutdown gracefully before resetting */
5032 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
5035 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
5037 struct net_device *dev = pci_get_drvdata(pdev);
5041 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5044 bp = netdev_priv(dev);
5048 pci_save_state(pdev);
5050 if (!netif_running(dev)) {
5055 netif_device_detach(dev);
5057 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5059 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5066 int bnx2x_resume(struct pci_dev *pdev)
5068 struct net_device *dev = pci_get_drvdata(pdev);
5073 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5076 bp = netdev_priv(dev);
5078 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5079 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5085 pci_restore_state(pdev);
5087 if (!netif_running(dev)) {
5092 bnx2x_set_power_state(bp, PCI_D0);
5093 netif_device_attach(dev);
5095 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5102 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5106 BNX2X_ERR("bad context pointer %p\n", cxt);
5110 /* ustorm cxt validation */
5111 cxt->ustorm_ag_context.cdu_usage =
5112 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5113 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5114 /* xcontext validation */
5115 cxt->xstorm_ag_context.cdu_reserved =
5116 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5117 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5120 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5121 u8 fw_sb_id, u8 sb_index,
5124 u32 addr = BAR_CSTRORM_INTMEM +
5125 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5126 REG_WR8(bp, addr, ticks);
5128 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5129 port, fw_sb_id, sb_index, ticks);
5132 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5133 u16 fw_sb_id, u8 sb_index,
5136 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5137 u32 addr = BAR_CSTRORM_INTMEM +
5138 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5139 u8 flags = REG_RD8(bp, addr);
5141 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5142 flags |= enable_flag;
5143 REG_WR8(bp, addr, flags);
5145 "port %x fw_sb_id %d sb_index %d disable %d\n",
5146 port, fw_sb_id, sb_index, disable);
5149 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5150 u8 sb_index, u8 disable, u16 usec)
5152 int port = BP_PORT(bp);
5153 u8 ticks = usec / BNX2X_BTR;
5155 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5157 disable = disable ? 1 : (usec ? 0 : 1);
5158 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5161 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5164 smp_mb__before_atomic();
5165 set_bit(flag, &bp->sp_rtnl_state);
5166 smp_mb__after_atomic();
5167 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5169 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5171 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);