1 /* bnx2x_cmn.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
26 #include <linux/crash_dump.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
52 static void bnx2x_add_all_napi(struct bnx2x *bp)
56 /* Add NAPI objects */
57 for_each_eth_queue(bp, i) {
58 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 bnx2x_poll, NAPI_POLL_WEIGHT);
63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
67 /* Reduce memory usage in kdump environment by using only one queue */
68 if (is_kdump_kernel())
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
76 * bnx2x_move_fp - move content of the fastpath structure.
79 * @from: source FP index
80 * @to: destination FP index
82 * Makes sure the contents of the bp->fp[to].napi is kept
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
85 * source onto the target. Update txdata pointers and related
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
110 to_fp->tpa_info = old_tpa_info;
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 * @delta: number of eth queues which were not allocated
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176 * backward along the array could cause memory to be overridden
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
192 /* free skb in the packet ring at pos idx
193 * return idx of last bd freed
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
205 u16 split_bd_len = 0;
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
211 txdata->txq_index, idx, tx_buf, skb);
213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
222 new_cons = nbd + tx_buf->first_bd;
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
227 /* Skip a parse bd... */
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
268 tx_buf->first_bd = 0;
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
276 struct netdev_queue *txq;
277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278 unsigned int pkts_compl = 0, bytes_compl = 0;
280 #ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
289 /* Ensure subsequent loads occur after hw_cons */
292 while (sw_cons != hw_cons) {
295 pkt_cons = TX_BD(sw_cons);
297 DP(NETIF_MSG_TX_DONE,
298 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
299 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
301 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
302 &pkts_compl, &bytes_compl);
307 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
309 txdata->tx_pkt_cons = sw_cons;
310 txdata->tx_bd_cons = bd_cons;
312 /* Need to make the tx_bd_cons update visible to start_xmit()
313 * before checking for netif_tx_queue_stopped(). Without the
314 * memory barrier, there is a small possibility that
315 * start_xmit() will miss it and cause the queue to be stopped
317 * On the other hand we need an rmb() here to ensure the proper
318 * ordering of bit testing in the following
319 * netif_tx_queue_stopped(txq) call.
323 if (unlikely(netif_tx_queue_stopped(txq))) {
324 /* Taking tx_lock() is needed to prevent re-enabling the queue
325 * while it's empty. This could have happen if rx_action() gets
326 * suspended in bnx2x_tx_int() after the condition before
327 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
329 * stops the queue->sees fresh tx_bd_cons->releases the queue->
330 * sends some packets consuming the whole queue again->
334 __netif_tx_lock(txq, smp_processor_id());
336 if ((netif_tx_queue_stopped(txq)) &&
337 (bp->state == BNX2X_STATE_OPEN) &&
338 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
339 netif_tx_wake_queue(txq);
341 __netif_tx_unlock(txq);
346 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
349 u16 last_max = fp->last_max_sge;
351 if (SUB_S16(idx, last_max) > 0)
352 fp->last_max_sge = idx;
355 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
357 struct eth_end_agg_rx_cqe *cqe)
359 struct bnx2x *bp = fp->bp;
360 u16 last_max, last_elem, first_elem;
367 /* First mark all used pages */
368 for (i = 0; i < sge_len; i++)
369 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
370 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
372 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
373 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
375 /* Here we assume that the last SGE index is the biggest */
376 prefetch((void *)(fp->sge_mask));
377 bnx2x_update_last_max_sge(fp,
378 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
380 last_max = RX_SGE(fp->last_max_sge);
381 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
382 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
384 /* If ring is not full */
385 if (last_elem + 1 != first_elem)
388 /* Now update the prod */
389 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
390 if (likely(fp->sge_mask[i]))
393 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
394 delta += BIT_VEC64_ELEM_SZ;
398 fp->rx_sge_prod += delta;
399 /* clear page-end entries */
400 bnx2x_clear_sge_mask_next_elems(fp);
403 DP(NETIF_MSG_RX_STATUS,
404 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
405 fp->last_max_sge, fp->rx_sge_prod);
408 /* Get Toeplitz hash value in the skb using the value from the
409 * CQE (calculated by HW).
411 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
412 const struct eth_fast_path_rx_cqe *cqe,
413 enum pkt_hash_types *rxhash_type)
415 /* Get Toeplitz hash from CQE */
416 if ((bp->dev->features & NETIF_F_RXHASH) &&
417 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
418 enum eth_rss_hash_type htype;
420 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
421 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
422 (htype == TCP_IPV6_HASH_TYPE)) ?
423 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
425 return le32_to_cpu(cqe->rss_hash_result);
427 *rxhash_type = PKT_HASH_TYPE_NONE;
431 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
433 struct eth_fast_path_rx_cqe *cqe)
435 struct bnx2x *bp = fp->bp;
436 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
437 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
438 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
440 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
441 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
443 /* print error if current state != stop */
444 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
445 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
447 /* Try to map an empty data buffer from the aggregation info */
448 mapping = dma_map_single(&bp->pdev->dev,
449 first_buf->data + NET_SKB_PAD,
450 fp->rx_buf_size, DMA_FROM_DEVICE);
452 * ...if it fails - move the skb from the consumer to the producer
453 * and set the current aggregation state as ERROR to drop it
454 * when TPA_STOP arrives.
457 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
458 /* Move the BD from the consumer to the producer */
459 bnx2x_reuse_rx_data(fp, cons, prod);
460 tpa_info->tpa_state = BNX2X_TPA_ERROR;
464 /* move empty data from pool to prod */
465 prod_rx_buf->data = first_buf->data;
466 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
467 /* point prod_bd to new data */
468 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
469 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
471 /* move partial skb from cons to pool (don't unmap yet) */
472 *first_buf = *cons_rx_buf;
474 /* mark bin state as START */
475 tpa_info->parsing_flags =
476 le16_to_cpu(cqe->pars_flags.flags);
477 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
478 tpa_info->tpa_state = BNX2X_TPA_START;
479 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
480 tpa_info->placement_offset = cqe->placement_offset;
481 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
482 if (fp->mode == TPA_MODE_GRO) {
483 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
484 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
485 tpa_info->gro_size = gro_size;
488 #ifdef BNX2X_STOP_ON_ERROR
489 fp->tpa_queue_used |= (1 << queue);
490 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
495 /* Timestamp option length allowed for TPA aggregation:
497 * nop nop kind length echo val
499 #define TPA_TSTAMP_OPT_LEN 12
501 * bnx2x_set_gro_params - compute GRO values
504 * @parsing_flags: parsing flags from the START CQE
505 * @len_on_bd: total length of the first packet for the
507 * @pkt_len: length of all segments
509 * Approximate value of the MSS for this aggregation calculated using
510 * the first packet of it.
511 * Compute number of aggregated segments, and gso_type.
513 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
514 u16 len_on_bd, unsigned int pkt_len,
515 u16 num_of_coalesced_segs)
517 /* TPA aggregation won't have either IP options or TCP options
518 * other than timestamp or IPv6 extension headers.
520 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
522 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
523 PRS_FLAG_OVERETH_IPV6) {
524 hdrs_len += sizeof(struct ipv6hdr);
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
527 hdrs_len += sizeof(struct iphdr);
528 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
531 /* Check if there was a TCP timestamp, if there is it's will
532 * always be 12 bytes length: nop nop kind length echo val.
534 * Otherwise FW would close the aggregation.
536 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537 hdrs_len += TPA_TSTAMP_OPT_LEN;
539 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
541 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542 * to skb_shinfo(skb)->gso_segs
544 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
547 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548 u16 index, gfp_t gfp_mask)
550 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
551 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
552 struct bnx2x_alloc_pool *pool = &fp->page_pool;
555 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
557 /* put page reference used by the memory pool, since we
558 * won't be using this page as the mempool anymore.
561 put_page(pool->page);
563 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
564 if (unlikely(!pool->page))
570 mapping = dma_map_page(&bp->pdev->dev, pool->page,
571 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
572 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
573 BNX2X_ERR("Can't map sge\n");
577 get_page(pool->page);
578 sw_buf->page = pool->page;
579 sw_buf->offset = pool->offset;
581 dma_unmap_addr_set(sw_buf, mapping, mapping);
583 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
584 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
586 pool->offset += SGE_PAGE_SIZE;
591 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
592 struct bnx2x_agg_info *tpa_info,
595 struct eth_end_agg_rx_cqe *cqe,
598 struct sw_rx_page *rx_pg, old_rx_pg;
599 u32 i, frag_len, frag_size;
600 int err, j, frag_id = 0;
601 u16 len_on_bd = tpa_info->len_on_bd;
602 u16 full_page = 0, gro_size = 0;
604 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
606 if (fp->mode == TPA_MODE_GRO) {
607 gro_size = tpa_info->gro_size;
608 full_page = tpa_info->full_page;
611 /* This is needed in order to enable forwarding support */
613 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
614 le16_to_cpu(cqe->pkt_len),
615 le16_to_cpu(cqe->num_of_coalesced_segs));
617 #ifdef BNX2X_STOP_ON_ERROR
618 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
619 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
621 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
627 /* Run through the SGL and compose the fragmented skb */
628 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
629 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
631 /* FW gives the indices of the SGE as if the ring is an array
632 (meaning that "next" element will consume 2 indices) */
633 if (fp->mode == TPA_MODE_GRO)
634 frag_len = min_t(u32, frag_size, (u32)full_page);
636 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
638 rx_pg = &fp->rx_page_ring[sge_idx];
641 /* If we fail to allocate a substitute page, we simply stop
642 where we are and drop the whole packet */
643 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
645 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
649 dma_unmap_page(&bp->pdev->dev,
650 dma_unmap_addr(&old_rx_pg, mapping),
651 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
652 /* Add one frag and update the appropriate fields in the skb */
653 if (fp->mode == TPA_MODE_LRO)
654 skb_fill_page_desc(skb, j, old_rx_pg.page,
655 old_rx_pg.offset, frag_len);
659 for (rem = frag_len; rem > 0; rem -= gro_size) {
660 int len = rem > gro_size ? gro_size : rem;
661 skb_fill_page_desc(skb, frag_id++,
663 old_rx_pg.offset + offset,
666 get_page(old_rx_pg.page);
671 skb->data_len += frag_len;
672 skb->truesize += SGE_PAGES;
673 skb->len += frag_len;
675 frag_size -= frag_len;
681 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
683 if (fp->rx_frag_size)
689 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
691 if (fp->rx_frag_size) {
692 /* GFP_KERNEL allocations are used only during initialization */
693 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
694 return (void *)__get_free_page(gfp_mask);
696 return netdev_alloc_frag(fp->rx_frag_size);
699 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
703 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
705 const struct iphdr *iph = ip_hdr(skb);
708 skb_set_transport_header(skb, sizeof(struct iphdr));
711 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
712 iph->saddr, iph->daddr, 0);
715 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
717 struct ipv6hdr *iph = ipv6_hdr(skb);
720 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
723 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
724 &iph->saddr, &iph->daddr, 0);
727 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
728 void (*gro_func)(struct bnx2x*, struct sk_buff*))
730 skb_set_network_header(skb, 0);
732 tcp_gro_complete(skb);
736 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 if (skb_shinfo(skb)->gso_size) {
741 switch (be16_to_cpu(skb->protocol)) {
743 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
746 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
749 WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
750 be16_to_cpu(skb->protocol));
754 skb_record_rx_queue(skb, fp->rx_queue);
755 napi_gro_receive(&fp->napi, skb);
758 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
759 struct bnx2x_agg_info *tpa_info,
761 struct eth_end_agg_rx_cqe *cqe,
764 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
765 u8 pad = tpa_info->placement_offset;
766 u16 len = tpa_info->len_on_bd;
767 struct sk_buff *skb = NULL;
768 u8 *new_data, *data = rx_buf->data;
769 u8 old_tpa_state = tpa_info->tpa_state;
771 tpa_info->tpa_state = BNX2X_TPA_STOP;
773 /* If we there was an error during the handling of the TPA_START -
774 * drop this aggregation.
776 if (old_tpa_state == BNX2X_TPA_ERROR)
779 /* Try to allocate the new data */
780 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
781 /* Unmap skb in the pool anyway, as we are going to change
782 pool entry status to BNX2X_TPA_STOP even if new skb allocation
784 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
785 fp->rx_buf_size, DMA_FROM_DEVICE);
786 if (likely(new_data))
787 skb = build_skb(data, fp->rx_frag_size);
790 #ifdef BNX2X_STOP_ON_ERROR
791 if (pad + len > fp->rx_buf_size) {
792 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
793 pad, len, fp->rx_buf_size);
799 skb_reserve(skb, pad + NET_SKB_PAD);
801 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
803 skb->protocol = eth_type_trans(skb, bp->dev);
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
806 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
807 skb, cqe, cqe_idx)) {
808 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
809 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
810 bnx2x_gro_receive(bp, fp, skb);
812 DP(NETIF_MSG_RX_STATUS,
813 "Failed to allocate new pages - dropping packet!\n");
814 dev_kfree_skb_any(skb);
817 /* put new data in bin */
818 rx_buf->data = new_data;
823 bnx2x_frag_free(fp, new_data);
825 /* drop the packet and keep the buffer in the bin */
826 DP(NETIF_MSG_RX_STATUS,
827 "Failed to allocate or map a new skb - dropping packet!\n");
828 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
831 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832 u16 index, gfp_t gfp_mask)
835 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
836 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
839 data = bnx2x_frag_alloc(fp, gfp_mask);
840 if (unlikely(data == NULL))
843 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
846 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
847 bnx2x_frag_free(fp, data);
848 BNX2X_ERR("Can't map rx data\n");
853 dma_unmap_addr_set(rx_buf, mapping, mapping);
855 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
856 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
862 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
863 struct bnx2x_fastpath *fp,
864 struct bnx2x_eth_q_stats *qstats)
866 /* Do nothing if no L4 csum validation was done.
867 * We do not check whether IP csum was validated. For IPv4 we assume
868 * that if the card got as far as validating the L4 csum, it also
869 * validated the IP csum. IPv6 has no IP csum.
871 if (cqe->fast_path_cqe.status_flags &
872 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
875 /* If L4 validation was done, check if an error was found. */
877 if (cqe->fast_path_cqe.type_error_flags &
878 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
879 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
880 qstats->hw_csum_err++;
882 skb->ip_summed = CHECKSUM_UNNECESSARY;
885 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
887 struct bnx2x *bp = fp->bp;
888 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
889 u16 sw_comp_cons, sw_comp_prod;
891 union eth_rx_cqe *cqe;
892 struct eth_fast_path_rx_cqe *cqe_fp;
894 #ifdef BNX2X_STOP_ON_ERROR
895 if (unlikely(bp->panic))
901 bd_cons = fp->rx_bd_cons;
902 bd_prod = fp->rx_bd_prod;
903 bd_prod_fw = bd_prod;
904 sw_comp_cons = fp->rx_comp_cons;
905 sw_comp_prod = fp->rx_comp_prod;
907 comp_ring_cons = RCQ_BD(sw_comp_cons);
908 cqe = &fp->rx_comp_ring[comp_ring_cons];
909 cqe_fp = &cqe->fast_path_cqe;
911 DP(NETIF_MSG_RX_STATUS,
912 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
914 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
915 struct sw_rx_bd *rx_buf = NULL;
918 enum eth_rx_cqe_type cqe_fp_type;
922 enum pkt_hash_types rxhash_type;
924 #ifdef BNX2X_STOP_ON_ERROR
925 if (unlikely(bp->panic))
929 bd_prod = RX_BD(bd_prod);
930 bd_cons = RX_BD(bd_cons);
932 /* A rmb() is required to ensure that the CQE is not read
933 * before it is written by the adapter DMA. PCI ordering
934 * rules will make sure the other fields are written before
935 * the marker at the end of struct eth_fast_path_rx_cqe
936 * but without rmb() a weakly ordered processor can process
937 * stale data. Without the barrier TPA state-machine might
938 * enter inconsistent state and kernel stack might be
939 * provided with incorrect packet description - these lead
940 * to various kernel crashed.
944 cqe_fp_flags = cqe_fp->type_error_flags;
945 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
947 DP(NETIF_MSG_RX_STATUS,
948 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
949 CQE_TYPE(cqe_fp_flags),
950 cqe_fp_flags, cqe_fp->status_flags,
951 le32_to_cpu(cqe_fp->rss_hash_result),
952 le16_to_cpu(cqe_fp->vlan_tag),
953 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
955 /* is this a slowpath msg? */
956 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
957 bnx2x_sp_event(fp, cqe);
961 rx_buf = &fp->rx_buf_ring[bd_cons];
964 if (!CQE_TYPE_FAST(cqe_fp_type)) {
965 struct bnx2x_agg_info *tpa_info;
966 u16 frag_size, pages;
967 #ifdef BNX2X_STOP_ON_ERROR
969 if (fp->mode == TPA_MODE_DISABLED &&
970 (CQE_TYPE_START(cqe_fp_type) ||
971 CQE_TYPE_STOP(cqe_fp_type)))
972 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
973 CQE_TYPE(cqe_fp_type));
976 if (CQE_TYPE_START(cqe_fp_type)) {
977 u16 queue = cqe_fp->queue_index;
978 DP(NETIF_MSG_RX_STATUS,
979 "calling tpa_start on queue %d\n",
982 bnx2x_tpa_start(fp, queue,
988 queue = cqe->end_agg_cqe.queue_index;
989 tpa_info = &fp->tpa_info[queue];
990 DP(NETIF_MSG_RX_STATUS,
991 "calling tpa_stop on queue %d\n",
994 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
997 if (fp->mode == TPA_MODE_GRO)
998 pages = (frag_size + tpa_info->full_page - 1) /
1001 pages = SGE_PAGE_ALIGN(frag_size) >>
1004 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1005 &cqe->end_agg_cqe, comp_ring_cons);
1006 #ifdef BNX2X_STOP_ON_ERROR
1011 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1015 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1016 pad = cqe_fp->placement_offset;
1017 dma_sync_single_for_cpu(&bp->pdev->dev,
1018 dma_unmap_addr(rx_buf, mapping),
1019 pad + RX_COPY_THRESH,
1022 prefetch(data + pad); /* speedup eth_type_trans() */
1023 /* is this an error packet? */
1024 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1025 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1026 "ERROR flags %x rx packet %u\n",
1027 cqe_fp_flags, sw_comp_cons);
1028 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1032 /* Since we don't have a jumbo ring
1033 * copy small packets if mtu > 1500
1035 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1036 (len <= RX_COPY_THRESH)) {
1037 skb = napi_alloc_skb(&fp->napi, len);
1039 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1040 "ERROR packet dropped because of alloc failure\n");
1041 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1044 memcpy(skb->data, data + pad, len);
1045 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1047 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1048 GFP_ATOMIC) == 0)) {
1049 dma_unmap_single(&bp->pdev->dev,
1050 dma_unmap_addr(rx_buf, mapping),
1053 skb = build_skb(data, fp->rx_frag_size);
1054 if (unlikely(!skb)) {
1055 bnx2x_frag_free(fp, data);
1056 bnx2x_fp_qstats(bp, fp)->
1057 rx_skb_alloc_failed++;
1060 skb_reserve(skb, pad);
1062 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1063 "ERROR packet dropped because of alloc failure\n");
1064 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1066 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1072 skb->protocol = eth_type_trans(skb, bp->dev);
1074 /* Set Toeplitz hash for a none-LRO skb */
1075 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1076 skb_set_hash(skb, rxhash, rxhash_type);
1078 skb_checksum_none_assert(skb);
1080 if (bp->dev->features & NETIF_F_RXCSUM)
1081 bnx2x_csum_validate(skb, cqe, fp,
1082 bnx2x_fp_qstats(bp, fp));
1084 skb_record_rx_queue(skb, fp->rx_queue);
1086 /* Check if this packet was timestamped */
1087 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1088 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1089 bnx2x_set_rx_ts(bp, skb);
1091 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1093 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1094 le16_to_cpu(cqe_fp->vlan_tag));
1096 napi_gro_receive(&fp->napi, skb);
1098 rx_buf->data = NULL;
1100 bd_cons = NEXT_RX_IDX(bd_cons);
1101 bd_prod = NEXT_RX_IDX(bd_prod);
1102 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1105 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1106 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1108 /* mark CQE as free */
1109 BNX2X_SEED_CQE(cqe_fp);
1111 if (rx_pkt == budget)
1114 comp_ring_cons = RCQ_BD(sw_comp_cons);
1115 cqe = &fp->rx_comp_ring[comp_ring_cons];
1116 cqe_fp = &cqe->fast_path_cqe;
1119 fp->rx_bd_cons = bd_cons;
1120 fp->rx_bd_prod = bd_prod_fw;
1121 fp->rx_comp_cons = sw_comp_cons;
1122 fp->rx_comp_prod = sw_comp_prod;
1124 /* Update producers */
1125 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1131 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1133 struct bnx2x_fastpath *fp = fp_cookie;
1134 struct bnx2x *bp = fp->bp;
1138 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1139 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1141 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1143 #ifdef BNX2X_STOP_ON_ERROR
1144 if (unlikely(bp->panic))
1148 /* Handle Rx and Tx according to MSI-X vector */
1149 for_each_cos_in_tx_queue(fp, cos)
1150 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1152 prefetch(&fp->sb_running_index[SM_RX_ID]);
1153 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1158 /* HW Lock for shared dual port PHYs */
1159 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1161 mutex_lock(&bp->port.phy_mutex);
1163 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1166 void bnx2x_release_phy_lock(struct bnx2x *bp)
1168 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1170 mutex_unlock(&bp->port.phy_mutex);
1173 /* calculates MF speed according to current linespeed and MF configuration */
1174 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1176 u16 line_speed = bp->link_vars.line_speed;
1178 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1179 bp->mf_config[BP_VN(bp)]);
1181 /* Calculate the current MAX line speed limit for the MF
1184 if (IS_MF_PERCENT_BW(bp))
1185 line_speed = (line_speed * maxCfg) / 100;
1186 else { /* SD mode */
1187 u16 vn_max_rate = maxCfg * 100;
1189 if (vn_max_rate < line_speed)
1190 line_speed = vn_max_rate;
1198 * bnx2x_fill_report_data - fill link report data to report
1200 * @bp: driver handle
1201 * @data: link state to update
1203 * It uses a none-atomic bit operations because is called under the mutex.
1205 static void bnx2x_fill_report_data(struct bnx2x *bp,
1206 struct bnx2x_link_report_data *data)
1208 memset(data, 0, sizeof(*data));
1211 /* Fill the report data: effective line speed */
1212 data->line_speed = bnx2x_get_mf_speed(bp);
1215 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1216 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1217 &data->link_report_flags);
1219 if (!BNX2X_NUM_ETH_QUEUES(bp))
1220 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1221 &data->link_report_flags);
1224 if (bp->link_vars.duplex == DUPLEX_FULL)
1225 __set_bit(BNX2X_LINK_REPORT_FD,
1226 &data->link_report_flags);
1228 /* Rx Flow Control is ON */
1229 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1230 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1231 &data->link_report_flags);
1233 /* Tx Flow Control is ON */
1234 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1235 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1236 &data->link_report_flags);
1238 *data = bp->vf_link_vars;
1243 * bnx2x_link_report - report link status to OS.
1245 * @bp: driver handle
1247 * Calls the __bnx2x_link_report() under the same locking scheme
1248 * as a link/PHY state managing code to ensure a consistent link
1252 void bnx2x_link_report(struct bnx2x *bp)
1254 bnx2x_acquire_phy_lock(bp);
1255 __bnx2x_link_report(bp);
1256 bnx2x_release_phy_lock(bp);
1260 * __bnx2x_link_report - report link status to OS.
1262 * @bp: driver handle
1264 * None atomic implementation.
1265 * Should be called under the phy_lock.
1267 void __bnx2x_link_report(struct bnx2x *bp)
1269 struct bnx2x_link_report_data cur_data;
1271 if (bp->force_link_down) {
1272 bp->link_vars.link_up = 0;
1277 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1278 bnx2x_read_mf_cfg(bp);
1280 /* Read the current link report info */
1281 bnx2x_fill_report_data(bp, &cur_data);
1283 /* Don't report link down or exactly the same link status twice */
1284 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1285 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1286 &bp->last_reported_link.link_report_flags) &&
1287 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1288 &cur_data.link_report_flags)))
1293 /* We are going to report a new link parameters now -
1294 * remember the current data for the next time.
1296 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1298 /* propagate status to VFs */
1300 bnx2x_iov_link_update(bp);
1302 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1303 &cur_data.link_report_flags)) {
1304 netif_carrier_off(bp->dev);
1305 netdev_err(bp->dev, "NIC Link is Down\n");
1311 netif_carrier_on(bp->dev);
1313 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1314 &cur_data.link_report_flags))
1319 /* Handle the FC at the end so that only these flags would be
1320 * possibly set. This way we may easily check if there is no FC
1323 if (cur_data.link_report_flags) {
1324 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1325 &cur_data.link_report_flags)) {
1326 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1327 &cur_data.link_report_flags))
1328 flow = "ON - receive & transmit";
1330 flow = "ON - receive";
1332 flow = "ON - transmit";
1337 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1338 cur_data.line_speed, duplex, flow);
1342 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1346 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1347 struct eth_rx_sge *sge;
1349 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1351 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1352 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1355 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1356 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1360 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1361 struct bnx2x_fastpath *fp, int last)
1365 for (i = 0; i < last; i++) {
1366 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1367 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1368 u8 *data = first_buf->data;
1371 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1374 if (tpa_info->tpa_state == BNX2X_TPA_START)
1375 dma_unmap_single(&bp->pdev->dev,
1376 dma_unmap_addr(first_buf, mapping),
1377 fp->rx_buf_size, DMA_FROM_DEVICE);
1378 bnx2x_frag_free(fp, data);
1379 first_buf->data = NULL;
1383 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1387 for_each_rx_queue_cnic(bp, j) {
1388 struct bnx2x_fastpath *fp = &bp->fp[j];
1392 /* Activate BD ring */
1394 * this will generate an interrupt (to the TSTORM)
1395 * must only be done after chip is initialized
1397 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1402 void bnx2x_init_rx_rings(struct bnx2x *bp)
1404 int func = BP_FUNC(bp);
1408 /* Allocate TPA resources */
1409 for_each_eth_queue(bp, j) {
1410 struct bnx2x_fastpath *fp = &bp->fp[j];
1413 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1415 if (fp->mode != TPA_MODE_DISABLED) {
1416 /* Fill the per-aggregation pool */
1417 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1418 struct bnx2x_agg_info *tpa_info =
1420 struct sw_rx_bd *first_buf =
1421 &tpa_info->first_buf;
1424 bnx2x_frag_alloc(fp, GFP_KERNEL);
1425 if (!first_buf->data) {
1426 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1428 bnx2x_free_tpa_pool(bp, fp, i);
1429 fp->mode = TPA_MODE_DISABLED;
1432 dma_unmap_addr_set(first_buf, mapping, 0);
1433 tpa_info->tpa_state = BNX2X_TPA_STOP;
1436 /* "next page" elements initialization */
1437 bnx2x_set_next_page_sgl(fp);
1439 /* set SGEs bit mask */
1440 bnx2x_init_sge_ring_bit_mask(fp);
1442 /* Allocate SGEs and initialize the ring elements */
1443 for (i = 0, ring_prod = 0;
1444 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1446 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1448 BNX2X_ERR("was only able to allocate %d rx sges\n",
1450 BNX2X_ERR("disabling TPA for queue[%d]\n",
1452 /* Cleanup already allocated elements */
1453 bnx2x_free_rx_sge_range(bp, fp,
1455 bnx2x_free_tpa_pool(bp, fp,
1457 fp->mode = TPA_MODE_DISABLED;
1461 ring_prod = NEXT_SGE_IDX(ring_prod);
1464 fp->rx_sge_prod = ring_prod;
1468 for_each_eth_queue(bp, j) {
1469 struct bnx2x_fastpath *fp = &bp->fp[j];
1473 /* Activate BD ring */
1475 * this will generate an interrupt (to the TSTORM)
1476 * must only be done after chip is initialized
1478 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1484 if (CHIP_IS_E1(bp)) {
1485 REG_WR(bp, BAR_USTRORM_INTMEM +
1486 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1487 U64_LO(fp->rx_comp_mapping));
1488 REG_WR(bp, BAR_USTRORM_INTMEM +
1489 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1490 U64_HI(fp->rx_comp_mapping));
1495 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1498 struct bnx2x *bp = fp->bp;
1500 for_each_cos_in_tx_queue(fp, cos) {
1501 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1502 unsigned pkts_compl = 0, bytes_compl = 0;
1504 u16 sw_prod = txdata->tx_pkt_prod;
1505 u16 sw_cons = txdata->tx_pkt_cons;
1507 while (sw_cons != sw_prod) {
1508 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1509 &pkts_compl, &bytes_compl);
1513 netdev_tx_reset_queue(
1514 netdev_get_tx_queue(bp->dev,
1515 txdata->txq_index));
1519 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1523 for_each_tx_queue_cnic(bp, i) {
1524 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1528 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1532 for_each_eth_queue(bp, i) {
1533 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1537 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1539 struct bnx2x *bp = fp->bp;
1542 /* ring wasn't allocated */
1543 if (fp->rx_buf_ring == NULL)
1546 for (i = 0; i < NUM_RX_BD; i++) {
1547 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1548 u8 *data = rx_buf->data;
1552 dma_unmap_single(&bp->pdev->dev,
1553 dma_unmap_addr(rx_buf, mapping),
1554 fp->rx_buf_size, DMA_FROM_DEVICE);
1556 rx_buf->data = NULL;
1557 bnx2x_frag_free(fp, data);
1561 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1565 for_each_rx_queue_cnic(bp, j) {
1566 bnx2x_free_rx_bds(&bp->fp[j]);
1570 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1574 for_each_eth_queue(bp, j) {
1575 struct bnx2x_fastpath *fp = &bp->fp[j];
1577 bnx2x_free_rx_bds(fp);
1579 if (fp->mode != TPA_MODE_DISABLED)
1580 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1584 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1586 bnx2x_free_tx_skbs_cnic(bp);
1587 bnx2x_free_rx_skbs_cnic(bp);
1590 void bnx2x_free_skbs(struct bnx2x *bp)
1592 bnx2x_free_tx_skbs(bp);
1593 bnx2x_free_rx_skbs(bp);
1596 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1598 /* load old values */
1599 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1601 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1602 /* leave all but MAX value */
1603 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1605 /* set new MAX value */
1606 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1607 & FUNC_MF_CFG_MAX_BW_MASK;
1609 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1614 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1616 * @bp: driver handle
1617 * @nvecs: number of vectors to be released
1619 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1623 if (nvecs == offset)
1626 /* VFs don't have a default SB */
1628 free_irq(bp->msix_table[offset].vector, bp->dev);
1629 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1630 bp->msix_table[offset].vector);
1634 if (CNIC_SUPPORT(bp)) {
1635 if (nvecs == offset)
1640 for_each_eth_queue(bp, i) {
1641 if (nvecs == offset)
1643 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1644 i, bp->msix_table[offset].vector);
1646 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1650 void bnx2x_free_irq(struct bnx2x *bp)
1652 if (bp->flags & USING_MSIX_FLAG &&
1653 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1654 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1656 /* vfs don't have a default status block */
1660 bnx2x_free_msix_irqs(bp, nvecs);
1662 free_irq(bp->dev->irq, bp->dev);
1666 int bnx2x_enable_msix(struct bnx2x *bp)
1668 int msix_vec = 0, i, rc;
1670 /* VFs don't have a default status block */
1672 bp->msix_table[msix_vec].entry = msix_vec;
1673 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1674 bp->msix_table[0].entry);
1678 /* Cnic requires an msix vector for itself */
1679 if (CNIC_SUPPORT(bp)) {
1680 bp->msix_table[msix_vec].entry = msix_vec;
1681 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1682 msix_vec, bp->msix_table[msix_vec].entry);
1686 /* We need separate vectors for ETH queues only (not FCoE) */
1687 for_each_eth_queue(bp, i) {
1688 bp->msix_table[msix_vec].entry = msix_vec;
1689 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1690 msix_vec, msix_vec, i);
1694 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1697 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1698 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1700 * reconfigure number of tx/rx queues according to available
1703 if (rc == -ENOSPC) {
1704 /* Get by with single vector */
1705 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1707 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1712 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1713 bp->flags |= USING_SINGLE_MSIX_FLAG;
1715 BNX2X_DEV_INFO("set number of queues to 1\n");
1716 bp->num_ethernet_queues = 1;
1717 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1718 } else if (rc < 0) {
1719 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1721 } else if (rc < msix_vec) {
1722 /* how less vectors we will have? */
1723 int diff = msix_vec - rc;
1725 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1728 * decrease number of queues by number of unallocated entries
1730 bp->num_ethernet_queues -= diff;
1731 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1733 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1737 bp->flags |= USING_MSIX_FLAG;
1742 /* fall to INTx if not enough memory */
1744 bp->flags |= DISABLE_MSI_FLAG;
1749 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1751 int i, rc, offset = 0;
1753 /* no default status block for vf */
1755 rc = request_irq(bp->msix_table[offset++].vector,
1756 bnx2x_msix_sp_int, 0,
1757 bp->dev->name, bp->dev);
1759 BNX2X_ERR("request sp irq failed\n");
1764 if (CNIC_SUPPORT(bp))
1767 for_each_eth_queue(bp, i) {
1768 struct bnx2x_fastpath *fp = &bp->fp[i];
1769 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1772 rc = request_irq(bp->msix_table[offset].vector,
1773 bnx2x_msix_fp_int, 0, fp->name, fp);
1775 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1776 bp->msix_table[offset].vector, rc);
1777 bnx2x_free_msix_irqs(bp, offset);
1784 i = BNX2X_NUM_ETH_QUEUES(bp);
1786 offset = 1 + CNIC_SUPPORT(bp);
1787 netdev_info(bp->dev,
1788 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1789 bp->msix_table[0].vector,
1790 0, bp->msix_table[offset].vector,
1791 i - 1, bp->msix_table[offset + i - 1].vector);
1793 offset = CNIC_SUPPORT(bp);
1794 netdev_info(bp->dev,
1795 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1796 0, bp->msix_table[offset].vector,
1797 i - 1, bp->msix_table[offset + i - 1].vector);
1802 int bnx2x_enable_msi(struct bnx2x *bp)
1806 rc = pci_enable_msi(bp->pdev);
1808 BNX2X_DEV_INFO("MSI is not attainable\n");
1811 bp->flags |= USING_MSI_FLAG;
1816 static int bnx2x_req_irq(struct bnx2x *bp)
1818 unsigned long flags;
1821 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1824 flags = IRQF_SHARED;
1826 if (bp->flags & USING_MSIX_FLAG)
1827 irq = bp->msix_table[0].vector;
1829 irq = bp->pdev->irq;
1831 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1834 static int bnx2x_setup_irqs(struct bnx2x *bp)
1837 if (bp->flags & USING_MSIX_FLAG &&
1838 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1839 rc = bnx2x_req_msix_irqs(bp);
1843 rc = bnx2x_req_irq(bp);
1845 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1848 if (bp->flags & USING_MSI_FLAG) {
1849 bp->dev->irq = bp->pdev->irq;
1850 netdev_info(bp->dev, "using MSI IRQ %d\n",
1853 if (bp->flags & USING_MSIX_FLAG) {
1854 bp->dev->irq = bp->msix_table[0].vector;
1855 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1863 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1867 for_each_rx_queue_cnic(bp, i) {
1868 napi_enable(&bnx2x_fp(bp, i, napi));
1872 static void bnx2x_napi_enable(struct bnx2x *bp)
1876 for_each_eth_queue(bp, i) {
1877 napi_enable(&bnx2x_fp(bp, i, napi));
1881 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1885 for_each_rx_queue_cnic(bp, i) {
1886 napi_disable(&bnx2x_fp(bp, i, napi));
1890 static void bnx2x_napi_disable(struct bnx2x *bp)
1894 for_each_eth_queue(bp, i) {
1895 napi_disable(&bnx2x_fp(bp, i, napi));
1899 void bnx2x_netif_start(struct bnx2x *bp)
1901 if (netif_running(bp->dev)) {
1902 bnx2x_napi_enable(bp);
1903 if (CNIC_LOADED(bp))
1904 bnx2x_napi_enable_cnic(bp);
1905 bnx2x_int_enable(bp);
1906 if (bp->state == BNX2X_STATE_OPEN)
1907 netif_tx_wake_all_queues(bp->dev);
1911 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1913 bnx2x_int_disable_sync(bp, disable_hw);
1914 bnx2x_napi_disable(bp);
1915 if (CNIC_LOADED(bp))
1916 bnx2x_napi_disable_cnic(bp);
1919 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1920 void *accel_priv, select_queue_fallback_t fallback)
1922 struct bnx2x *bp = netdev_priv(dev);
1924 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1925 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1926 u16 ether_type = ntohs(hdr->h_proto);
1928 /* Skip VLAN tag if present */
1929 if (ether_type == ETH_P_8021Q) {
1930 struct vlan_ethhdr *vhdr =
1931 (struct vlan_ethhdr *)skb->data;
1933 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1936 /* If ethertype is FCoE or FIP - use FCoE ring */
1937 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1938 return bnx2x_fcoe_tx(bp, txq_index);
1941 /* select a non-FCoE queue */
1942 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
1945 void bnx2x_set_num_queues(struct bnx2x *bp)
1948 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1950 /* override in STORAGE SD modes */
1951 if (IS_MF_STORAGE_ONLY(bp))
1952 bp->num_ethernet_queues = 1;
1954 /* Add special queues */
1955 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1956 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1958 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1962 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1964 * @bp: Driver handle
1966 * We currently support for at most 16 Tx queues for each CoS thus we will
1967 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1970 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1971 * index after all ETH L2 indices.
1973 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1974 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1975 * 16..31,...) with indices that are not coupled with any real Tx queue.
1977 * The proper configuration of skb->queue_mapping is handled by
1978 * bnx2x_select_queue() and __skb_tx_hash().
1980 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1981 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1983 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1987 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1988 rx = BNX2X_NUM_ETH_QUEUES(bp);
1990 /* account for fcoe queue */
1991 if (include_cnic && !NO_FCOE(bp)) {
1996 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1998 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2001 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2003 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2007 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2013 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2017 for_each_queue(bp, i) {
2018 struct bnx2x_fastpath *fp = &bp->fp[i];
2021 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2024 * Although there are no IP frames expected to arrive to
2025 * this ring we still want to add an
2026 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2029 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2032 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2033 IP_HEADER_ALIGNMENT_PADDING +
2036 BNX2X_FW_RX_ALIGN_END;
2037 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2038 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2039 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2040 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2042 fp->rx_frag_size = 0;
2046 static int bnx2x_init_rss(struct bnx2x *bp)
2049 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2051 /* Prepare the initial contents for the indirection table if RSS is
2054 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2055 bp->rss_conf_obj.ind_table[i] =
2057 ethtool_rxfh_indir_default(i, num_eth_queues);
2060 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2061 * per-port, so if explicit configuration is needed , do it only
2064 * For 57712 and newer on the other hand it's a per-function
2067 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2070 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2071 bool config_hash, bool enable)
2073 struct bnx2x_config_rss_params params = {NULL};
2075 /* Although RSS is meaningless when there is a single HW queue we
2076 * still need it enabled in order to have HW Rx hash generated.
2078 * if (!is_eth_multi(bp))
2079 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2082 params.rss_obj = rss_obj;
2084 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2087 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2089 /* RSS configuration */
2090 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2091 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2092 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2093 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2094 if (rss_obj->udp_rss_v4)
2095 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2096 if (rss_obj->udp_rss_v6)
2097 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2099 if (!CHIP_IS_E1x(bp)) {
2100 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2101 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2102 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2104 /* valid only for TUNN_MODE_GRE tunnel mode */
2105 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2108 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2112 params.rss_result_mask = MULTI_MASK;
2114 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2118 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2119 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2123 return bnx2x_config_rss(bp, ¶ms);
2125 return bnx2x_vfpf_config_rss(bp, ¶ms);
2128 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2130 struct bnx2x_func_state_params func_params = {NULL};
2132 /* Prepare parameters for function state transitions */
2133 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2135 func_params.f_obj = &bp->func_obj;
2136 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2138 func_params.params.hw_init.load_phase = load_code;
2140 return bnx2x_func_state_change(bp, &func_params);
2144 * Cleans the object that have internal lists without sending
2145 * ramrods. Should be run when interrupts are disabled.
2147 void bnx2x_squeeze_objects(struct bnx2x *bp)
2150 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2151 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2152 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2154 /***************** Cleanup MACs' object first *************************/
2156 /* Wait for completion of requested */
2157 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2158 /* Perform a dry cleanup */
2159 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2161 /* Clean ETH primary MAC */
2162 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2163 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2166 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2168 /* Cleanup UC list */
2170 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2171 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2174 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2176 /***************** Now clean mcast object *****************************/
2177 rparam.mcast_obj = &bp->mcast_obj;
2178 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2180 /* Add a DEL command... - Since we're doing a driver cleanup only,
2181 * we take a lock surrounding both the initial send and the CONTs,
2182 * as we don't want a true completion to disrupt us in the middle.
2184 netif_addr_lock_bh(bp->dev);
2185 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2187 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2190 /* ...and wait until all pending commands are cleared */
2191 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2194 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2196 netif_addr_unlock_bh(bp->dev);
2200 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2202 netif_addr_unlock_bh(bp->dev);
2205 #ifndef BNX2X_STOP_ON_ERROR
2206 #define LOAD_ERROR_EXIT(bp, label) \
2208 (bp)->state = BNX2X_STATE_ERROR; \
2212 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2214 bp->cnic_loaded = false; \
2217 #else /*BNX2X_STOP_ON_ERROR*/
2218 #define LOAD_ERROR_EXIT(bp, label) \
2220 (bp)->state = BNX2X_STATE_ERROR; \
2224 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2226 bp->cnic_loaded = false; \
2230 #endif /*BNX2X_STOP_ON_ERROR*/
2232 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2234 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2235 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2239 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2241 int num_groups, vf_headroom = 0;
2242 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2244 /* number of queues for statistics is number of eth queues + FCoE */
2245 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2247 /* Total number of FW statistics requests =
2248 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2249 * and fcoe l2 queue) stats + num of queues (which includes another 1
2250 * for fcoe l2 queue if applicable)
2252 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2254 /* vf stats appear in the request list, but their data is allocated by
2255 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2256 * it is used to determine where to place the vf stats queries in the
2260 vf_headroom = bnx2x_vf_headroom(bp);
2262 /* Request is built from stats_query_header and an array of
2263 * stats_query_cmd_group each of which contains
2264 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2265 * configured in the stats_query_header.
2268 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2269 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2272 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2273 bp->fw_stats_num, vf_headroom, num_groups);
2274 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2275 num_groups * sizeof(struct stats_query_cmd_group);
2277 /* Data for statistics requests + stats_counter
2278 * stats_counter holds per-STORM counters that are incremented
2279 * when STORM has finished with the current request.
2280 * memory for FCoE offloaded statistics are counted anyway,
2281 * even if they will not be sent.
2282 * VF stats are not accounted for here as the data of VF stats is stored
2283 * in memory allocated by the VF, not here.
2285 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2286 sizeof(struct per_pf_stats) +
2287 sizeof(struct fcoe_statistics_params) +
2288 sizeof(struct per_queue_stats) * num_queue_stats +
2289 sizeof(struct stats_counter);
2291 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2292 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2297 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2298 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2299 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2300 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2301 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2302 bp->fw_stats_req_sz;
2304 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2305 U64_HI(bp->fw_stats_req_mapping),
2306 U64_LO(bp->fw_stats_req_mapping));
2307 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2308 U64_HI(bp->fw_stats_data_mapping),
2309 U64_LO(bp->fw_stats_data_mapping));
2313 bnx2x_free_fw_stats_mem(bp);
2314 BNX2X_ERR("Can't allocate FW stats memory\n");
2318 /* send load request to mcp and analyze response */
2319 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2325 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2326 DRV_MSG_SEQ_NUMBER_MASK);
2327 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2329 /* Get current FW pulse sequence */
2330 bp->fw_drv_pulse_wr_seq =
2331 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2332 DRV_PULSE_SEQ_MASK);
2333 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2335 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2337 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2338 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2341 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2343 /* if mcp fails to respond we must abort */
2344 if (!(*load_code)) {
2345 BNX2X_ERR("MCP response failure, aborting\n");
2349 /* If mcp refused (e.g. other port is in diagnostic mode) we
2352 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2353 BNX2X_ERR("MCP refused load request, aborting\n");
2359 /* check whether another PF has already loaded FW to chip. In
2360 * virtualized environments a pf from another VM may have already
2361 * initialized the device including loading FW
2363 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2365 /* is another pf loaded on this engine? */
2366 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2367 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2368 /* build my FW version dword */
2371 /* read loaded FW from chip */
2372 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2374 u32 my_fw = ~loaded_fw;
2376 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2379 /* abort nic load if version mismatch */
2380 if (my_fw != loaded_fw) {
2382 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2385 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2393 /* returns the "mcp load_code" according to global load_count array */
2394 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2396 int path = BP_PATH(bp);
2398 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2399 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2400 bnx2x_load_count[path][2]);
2401 bnx2x_load_count[path][0]++;
2402 bnx2x_load_count[path][1 + port]++;
2403 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2404 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2405 bnx2x_load_count[path][2]);
2406 if (bnx2x_load_count[path][0] == 1)
2407 return FW_MSG_CODE_DRV_LOAD_COMMON;
2408 else if (bnx2x_load_count[path][1 + port] == 1)
2409 return FW_MSG_CODE_DRV_LOAD_PORT;
2411 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2414 /* mark PMF if applicable */
2415 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2417 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2418 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2419 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2421 /* We need the barrier to ensure the ordering between the
2422 * writing to bp->port.pmf here and reading it from the
2423 * bnx2x_periodic_task().
2430 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2433 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2435 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2436 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2437 (bp->common.shmem2_base)) {
2438 if (SHMEM2_HAS(bp, dcc_support))
2439 SHMEM2_WR(bp, dcc_support,
2440 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2441 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2442 if (SHMEM2_HAS(bp, afex_driver_support))
2443 SHMEM2_WR(bp, afex_driver_support,
2444 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2447 /* Set AFEX default VLAN tag to an invalid value */
2448 bp->afex_def_vlan_tag = -1;
2452 * bnx2x_bz_fp - zero content of the fastpath structure.
2454 * @bp: driver handle
2455 * @index: fastpath index to be zeroed
2457 * Makes sure the contents of the bp->fp[index].napi is kept
2460 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2462 struct bnx2x_fastpath *fp = &bp->fp[index];
2464 struct napi_struct orig_napi = fp->napi;
2465 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2467 /* bzero bnx2x_fastpath contents */
2469 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2470 sizeof(struct bnx2x_agg_info));
2471 memset(fp, 0, sizeof(*fp));
2473 /* Restore the NAPI object as it has been already initialized */
2474 fp->napi = orig_napi;
2475 fp->tpa_info = orig_tpa_info;
2479 fp->max_cos = bp->max_cos;
2481 /* Special queues support only one CoS */
2484 /* Init txdata pointers */
2486 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2488 for_each_cos_in_tx_queue(fp, cos)
2489 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2490 BNX2X_NUM_ETH_QUEUES(bp) + index];
2492 /* set the tpa flag for each queue. The tpa flag determines the queue
2493 * minimal size so it must be set prior to queue memory allocation
2495 if (bp->dev->features & NETIF_F_LRO)
2496 fp->mode = TPA_MODE_LRO;
2497 else if (bp->dev->features & NETIF_F_GRO &&
2498 bnx2x_mtu_allows_gro(bp->dev->mtu))
2499 fp->mode = TPA_MODE_GRO;
2501 fp->mode = TPA_MODE_DISABLED;
2503 /* We don't want TPA if it's disabled in bp
2504 * or if this is an FCoE L2 ring.
2506 if (bp->disable_tpa || IS_FCOE_FP(fp))
2507 fp->mode = TPA_MODE_DISABLED;
2510 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2514 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2517 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2518 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2521 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2524 int bnx2x_load_cnic(struct bnx2x *bp)
2526 int i, rc, port = BP_PORT(bp);
2528 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2530 mutex_init(&bp->cnic_mutex);
2533 rc = bnx2x_alloc_mem_cnic(bp);
2535 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2536 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2540 rc = bnx2x_alloc_fp_mem_cnic(bp);
2542 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2543 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2546 /* Update the number of queues with the cnic queues */
2547 rc = bnx2x_set_real_num_queues(bp, 1);
2549 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2550 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2553 /* Add all CNIC NAPI objects */
2554 bnx2x_add_all_napi_cnic(bp);
2555 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2556 bnx2x_napi_enable_cnic(bp);
2558 rc = bnx2x_init_hw_func_cnic(bp);
2560 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2562 bnx2x_nic_init_cnic(bp);
2565 /* Enable Timer scan */
2566 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2568 /* setup cnic queues */
2569 for_each_cnic_queue(bp, i) {
2570 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2572 BNX2X_ERR("Queue setup failed\n");
2573 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2578 /* Initialize Rx filter. */
2579 bnx2x_set_rx_mode_inner(bp);
2581 /* re-read iscsi info */
2582 bnx2x_get_iscsi_info(bp);
2583 bnx2x_setup_cnic_irq_info(bp);
2584 bnx2x_setup_cnic_info(bp);
2585 bp->cnic_loaded = true;
2586 if (bp->state == BNX2X_STATE_OPEN)
2587 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2589 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2593 #ifndef BNX2X_STOP_ON_ERROR
2595 /* Disable Timer scan */
2596 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2599 bnx2x_napi_disable_cnic(bp);
2600 /* Update the number of queues without the cnic queues */
2601 if (bnx2x_set_real_num_queues(bp, 0))
2602 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2604 BNX2X_ERR("CNIC-related load failed\n");
2605 bnx2x_free_fp_mem_cnic(bp);
2606 bnx2x_free_mem_cnic(bp);
2608 #endif /* ! BNX2X_STOP_ON_ERROR */
2611 /* must be called with rtnl_lock */
2612 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2614 int port = BP_PORT(bp);
2615 int i, rc = 0, load_code = 0;
2617 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2619 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2621 #ifdef BNX2X_STOP_ON_ERROR
2622 if (unlikely(bp->panic)) {
2623 BNX2X_ERR("Can't load NIC when there is panic\n");
2628 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2630 /* zero the structure w/o any lock, before SP handler is initialized */
2631 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2632 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2633 &bp->last_reported_link.link_report_flags);
2636 /* must be called before memory allocation and HW init */
2637 bnx2x_ilt_set_info(bp);
2640 * Zero fastpath structures preserving invariants like napi, which are
2641 * allocated only once, fp index, max_cos, bp pointer.
2642 * Also set fp->mode and txdata_ptr.
2644 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2645 for_each_queue(bp, i)
2647 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2648 bp->num_cnic_queues) *
2649 sizeof(struct bnx2x_fp_txdata));
2651 bp->fcoe_init = false;
2653 /* Set the receive queues buffer size */
2654 bnx2x_set_rx_buf_size(bp);
2657 rc = bnx2x_alloc_mem(bp);
2659 BNX2X_ERR("Unable to allocate bp memory\n");
2664 /* need to be done after alloc mem, since it's self adjusting to amount
2665 * of memory available for RSS queues
2667 rc = bnx2x_alloc_fp_mem(bp);
2669 BNX2X_ERR("Unable to allocate memory for fps\n");
2670 LOAD_ERROR_EXIT(bp, load_error0);
2673 /* Allocated memory for FW statistics */
2674 rc = bnx2x_alloc_fw_stats_mem(bp);
2676 LOAD_ERROR_EXIT(bp, load_error0);
2678 /* request pf to initialize status blocks */
2680 rc = bnx2x_vfpf_init(bp);
2682 LOAD_ERROR_EXIT(bp, load_error0);
2685 /* As long as bnx2x_alloc_mem() may possibly update
2686 * bp->num_queues, bnx2x_set_real_num_queues() should always
2687 * come after it. At this stage cnic queues are not counted.
2689 rc = bnx2x_set_real_num_queues(bp, 0);
2691 BNX2X_ERR("Unable to set real_num_queues\n");
2692 LOAD_ERROR_EXIT(bp, load_error0);
2695 /* configure multi cos mappings in kernel.
2696 * this configuration may be overridden by a multi class queue
2697 * discipline or by a dcbx negotiation result.
2699 bnx2x_setup_tc(bp->dev, bp->max_cos);
2701 /* Add all NAPI objects */
2702 bnx2x_add_all_napi(bp);
2703 DP(NETIF_MSG_IFUP, "napi added\n");
2704 bnx2x_napi_enable(bp);
2707 /* set pf load just before approaching the MCP */
2708 bnx2x_set_pf_load(bp);
2710 /* if mcp exists send load request and analyze response */
2711 if (!BP_NOMCP(bp)) {
2712 /* attempt to load pf */
2713 rc = bnx2x_nic_load_request(bp, &load_code);
2715 LOAD_ERROR_EXIT(bp, load_error1);
2717 /* what did mcp say? */
2718 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2720 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2721 LOAD_ERROR_EXIT(bp, load_error2);
2724 load_code = bnx2x_nic_load_no_mcp(bp, port);
2727 /* mark pmf if applicable */
2728 bnx2x_nic_load_pmf(bp, load_code);
2730 /* Init Function state controlling object */
2731 bnx2x__init_func_obj(bp);
2734 rc = bnx2x_init_hw(bp, load_code);
2736 BNX2X_ERR("HW init failed, aborting\n");
2737 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2738 LOAD_ERROR_EXIT(bp, load_error2);
2742 bnx2x_pre_irq_nic_init(bp);
2744 /* Connect to IRQs */
2745 rc = bnx2x_setup_irqs(bp);
2747 BNX2X_ERR("setup irqs failed\n");
2749 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2750 LOAD_ERROR_EXIT(bp, load_error2);
2753 /* Init per-function objects */
2755 /* Setup NIC internals and enable interrupts */
2756 bnx2x_post_irq_nic_init(bp, load_code);
2758 bnx2x_init_bp_objs(bp);
2759 bnx2x_iov_nic_init(bp);
2761 /* Set AFEX default VLAN tag to an invalid value */
2762 bp->afex_def_vlan_tag = -1;
2763 bnx2x_nic_load_afex_dcc(bp, load_code);
2764 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2765 rc = bnx2x_func_start(bp);
2767 BNX2X_ERR("Function start failed!\n");
2768 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2770 LOAD_ERROR_EXIT(bp, load_error3);
2773 /* Send LOAD_DONE command to MCP */
2774 if (!BP_NOMCP(bp)) {
2775 load_code = bnx2x_fw_command(bp,
2776 DRV_MSG_CODE_LOAD_DONE, 0);
2778 BNX2X_ERR("MCP response failure, aborting\n");
2780 LOAD_ERROR_EXIT(bp, load_error3);
2784 /* initialize FW coalescing state machines in RAM */
2785 bnx2x_update_coalesce(bp);
2788 /* setup the leading queue */
2789 rc = bnx2x_setup_leading(bp);
2791 BNX2X_ERR("Setup leading failed!\n");
2792 LOAD_ERROR_EXIT(bp, load_error3);
2795 /* set up the rest of the queues */
2796 for_each_nondefault_eth_queue(bp, i) {
2798 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2800 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2802 BNX2X_ERR("Queue %d setup failed\n", i);
2803 LOAD_ERROR_EXIT(bp, load_error3);
2808 rc = bnx2x_init_rss(bp);
2810 BNX2X_ERR("PF RSS init failed\n");
2811 LOAD_ERROR_EXIT(bp, load_error3);
2814 /* Now when Clients are configured we are ready to work */
2815 bp->state = BNX2X_STATE_OPEN;
2817 /* Configure a ucast MAC */
2819 rc = bnx2x_set_eth_mac(bp, true);
2821 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2824 BNX2X_ERR("Setting Ethernet MAC failed\n");
2825 LOAD_ERROR_EXIT(bp, load_error3);
2828 if (IS_PF(bp) && bp->pending_max) {
2829 bnx2x_update_max_mf_config(bp, bp->pending_max);
2830 bp->pending_max = 0;
2833 bp->force_link_down = false;
2835 rc = bnx2x_initial_phy_init(bp, load_mode);
2837 LOAD_ERROR_EXIT(bp, load_error3);
2839 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2841 /* Start fast path */
2843 /* Re-configure vlan filters */
2844 rc = bnx2x_vlan_reconfigure_vid(bp);
2846 LOAD_ERROR_EXIT(bp, load_error3);
2848 /* Initialize Rx filter. */
2849 bnx2x_set_rx_mode_inner(bp);
2851 if (bp->flags & PTP_SUPPORTED) {
2853 bnx2x_configure_ptp_filters(bp);
2856 switch (load_mode) {
2858 /* Tx queue should be only re-enabled */
2859 netif_tx_wake_all_queues(bp->dev);
2863 netif_tx_start_all_queues(bp->dev);
2864 smp_mb__after_atomic();
2868 case LOAD_LOOPBACK_EXT:
2869 bp->state = BNX2X_STATE_DIAG;
2877 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2879 bnx2x__link_status_update(bp);
2881 /* start the timer */
2882 mod_timer(&bp->timer, jiffies + bp->current_interval);
2884 if (CNIC_ENABLED(bp))
2885 bnx2x_load_cnic(bp);
2888 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2890 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2891 /* mark driver is loaded in shmem2 */
2893 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2894 val &= ~DRV_FLAGS_MTU_MASK;
2895 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2896 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2897 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2898 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2901 /* Wait for all pending SP commands to complete */
2902 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2903 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2904 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2908 /* Update driver data for On-Chip MFW dump. */
2910 bnx2x_update_mfw_dump(bp);
2912 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2913 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2914 bnx2x_dcbx_init(bp, false);
2916 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2917 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2919 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2923 #ifndef BNX2X_STOP_ON_ERROR
2926 bnx2x_int_disable_sync(bp, 1);
2928 /* Clean queueable objects */
2929 bnx2x_squeeze_objects(bp);
2932 /* Free SKBs, SGEs, TPA pool and driver internals */
2933 bnx2x_free_skbs(bp);
2934 for_each_rx_queue(bp, i)
2935 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2940 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2941 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2942 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2947 bnx2x_napi_disable(bp);
2948 bnx2x_del_all_napi(bp);
2950 /* clear pf_load status, as it was already set */
2952 bnx2x_clear_pf_load(bp);
2954 bnx2x_free_fw_stats_mem(bp);
2955 bnx2x_free_fp_mem(bp);
2959 #endif /* ! BNX2X_STOP_ON_ERROR */
2962 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2966 /* Wait until tx fastpath tasks complete */
2967 for_each_tx_queue(bp, i) {
2968 struct bnx2x_fastpath *fp = &bp->fp[i];
2970 for_each_cos_in_tx_queue(fp, cos)
2971 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2978 /* must be called with rtnl_lock */
2979 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2982 bool global = false;
2984 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2986 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2987 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2989 /* mark driver is unloaded in shmem2 */
2990 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2992 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2993 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2994 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2997 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2998 (bp->state == BNX2X_STATE_CLOSED ||
2999 bp->state == BNX2X_STATE_ERROR)) {
3000 /* We can get here if the driver has been unloaded
3001 * during parity error recovery and is either waiting for a
3002 * leader to complete or for other functions to unload and
3003 * then ifdown has been issued. In this case we want to
3004 * unload and let other functions to complete a recovery
3007 bp->recovery_state = BNX2X_RECOVERY_DONE;
3009 bnx2x_release_leader_lock(bp);
3012 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3013 BNX2X_ERR("Can't unload in closed or error state\n");
3017 /* Nothing to do during unload if previous bnx2x_nic_load()
3018 * have not completed successfully - all resources are released.
3020 * we can get here only after unsuccessful ndo_* callback, during which
3021 * dev->IFF_UP flag is still on.
3023 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3026 /* It's important to set the bp->state to the value different from
3027 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3028 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3030 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3033 /* indicate to VFs that the PF is going down */
3034 bnx2x_iov_channel_down(bp);
3036 if (CNIC_LOADED(bp))
3037 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3040 bnx2x_tx_disable(bp);
3041 netdev_reset_tc(bp->dev);
3043 bp->rx_mode = BNX2X_RX_MODE_NONE;
3045 del_timer_sync(&bp->timer);
3047 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3048 /* Set ALWAYS_ALIVE bit in shmem */
3049 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3050 bnx2x_drv_pulse(bp);
3051 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3052 bnx2x_save_statistics(bp);
3055 /* wait till consumers catch up with producers in all queues.
3056 * If we're recovering, FW can't write to host so no reason
3057 * to wait for the queues to complete all Tx.
3059 if (unload_mode != UNLOAD_RECOVERY)
3060 bnx2x_drain_tx_queues(bp);
3062 /* if VF indicate to PF this function is going down (PF will delete sp
3063 * elements and clear initializations
3066 bnx2x_clear_vlan_info(bp);
3067 bnx2x_vfpf_close_vf(bp);
3068 } else if (unload_mode != UNLOAD_RECOVERY) {
3069 /* if this is a normal/close unload need to clean up chip*/
3070 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3072 /* Send the UNLOAD_REQUEST to the MCP */
3073 bnx2x_send_unload_req(bp, unload_mode);
3075 /* Prevent transactions to host from the functions on the
3076 * engine that doesn't reset global blocks in case of global
3077 * attention once global blocks are reset and gates are opened
3078 * (the engine which leader will perform the recovery
3081 if (!CHIP_IS_E1x(bp))
3082 bnx2x_pf_disable(bp);
3084 /* Disable HW interrupts, NAPI */
3085 bnx2x_netif_stop(bp, 1);
3086 /* Delete all NAPI objects */
3087 bnx2x_del_all_napi(bp);
3088 if (CNIC_LOADED(bp))
3089 bnx2x_del_all_napi_cnic(bp);
3093 /* Report UNLOAD_DONE to MCP */
3094 bnx2x_send_unload_done(bp, false);
3098 * At this stage no more interrupts will arrive so we may safely clean
3099 * the queueable objects here in case they failed to get cleaned so far.
3102 bnx2x_squeeze_objects(bp);
3104 /* There should be no more pending SP commands at this stage */
3109 /* clear pending work in rtnl task */
3110 bp->sp_rtnl_state = 0;
3113 /* Free SKBs, SGEs, TPA pool and driver internals */
3114 bnx2x_free_skbs(bp);
3115 if (CNIC_LOADED(bp))
3116 bnx2x_free_skbs_cnic(bp);
3117 for_each_rx_queue(bp, i)
3118 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3120 bnx2x_free_fp_mem(bp);
3121 if (CNIC_LOADED(bp))
3122 bnx2x_free_fp_mem_cnic(bp);
3125 if (CNIC_LOADED(bp))
3126 bnx2x_free_mem_cnic(bp);
3130 bp->state = BNX2X_STATE_CLOSED;
3131 bp->cnic_loaded = false;
3133 /* Clear driver version indication in shmem */
3134 if (IS_PF(bp) && !BP_NOMCP(bp))
3135 bnx2x_update_mng_version(bp);
3137 /* Check if there are pending parity attentions. If there are - set
3138 * RECOVERY_IN_PROGRESS.
3140 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3141 bnx2x_set_reset_in_progress(bp);
3143 /* Set RESET_IS_GLOBAL if needed */
3145 bnx2x_set_reset_global(bp);
3148 /* The last driver must disable a "close the gate" if there is no
3149 * parity attention or "process kill" pending.
3152 !bnx2x_clear_pf_load(bp) &&
3153 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3154 bnx2x_disable_close_the_gate(bp);
3156 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3161 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3165 /* If there is no power capability, silently succeed */
3166 if (!bp->pdev->pm_cap) {
3167 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3171 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3175 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3176 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3177 PCI_PM_CTRL_PME_STATUS));
3179 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3180 /* delay required during transition out of D3hot */
3185 /* If there are other clients above don't
3186 shut down the power */
3187 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3189 /* Don't shut down the power for emulation and FPGA */
3190 if (CHIP_REV_IS_SLOW(bp))
3193 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3197 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3199 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3202 /* No more memory access after this point until
3203 * device is brought back to D0.
3208 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3215 * net_device service functions
3217 static int bnx2x_poll(struct napi_struct *napi, int budget)
3219 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3221 struct bnx2x *bp = fp->bp;
3225 #ifdef BNX2X_STOP_ON_ERROR
3226 if (unlikely(bp->panic)) {
3227 napi_complete(napi);
3231 for_each_cos_in_tx_queue(fp, cos)
3232 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3233 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3235 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3237 if (rx_work_done < budget) {
3238 /* No need to update SB for FCoE L2 ring as long as
3239 * it's connected to the default SB and the SB
3240 * has been updated when NAPI was scheduled.
3242 if (IS_FCOE_FP(fp)) {
3243 napi_complete(napi);
3245 bnx2x_update_fpsb_idx(fp);
3246 /* bnx2x_has_rx_work() reads the status block,
3247 * thus we need to ensure that status block indices
3248 * have been actually read (bnx2x_update_fpsb_idx)
3249 * prior to this check (bnx2x_has_rx_work) so that
3250 * we won't write the "newer" value of the status block
3251 * to IGU (if there was a DMA right after
3252 * bnx2x_has_rx_work and if there is no rmb, the memory
3253 * reading (bnx2x_update_fpsb_idx) may be postponed
3254 * to right before bnx2x_ack_sb). In this case there
3255 * will never be another interrupt until there is
3256 * another update of the status block, while there
3257 * is still unhandled work.
3261 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3262 napi_complete(napi);
3263 /* Re-enable interrupts */
3264 DP(NETIF_MSG_RX_STATUS,
3265 "Update index to %d\n", fp->fp_hc_idx);
3266 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3267 le16_to_cpu(fp->fp_hc_idx),
3270 rx_work_done = budget;
3275 return rx_work_done;
3278 /* we split the first BD into headers and data BDs
3279 * to ease the pain of our fellow microcode engineers
3280 * we use one mapping for both BDs
3282 static u16 bnx2x_tx_split(struct bnx2x *bp,
3283 struct bnx2x_fp_txdata *txdata,
3284 struct sw_tx_bd *tx_buf,
3285 struct eth_tx_start_bd **tx_bd, u16 hlen,
3288 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3289 struct eth_tx_bd *d_tx_bd;
3291 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3293 /* first fix first BD */
3294 h_tx_bd->nbytes = cpu_to_le16(hlen);
3296 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3297 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3299 /* now get a new data BD
3300 * (after the pbd) and fill it */
3301 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3302 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3304 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3305 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3307 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3308 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3309 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3311 /* this marks the BD as one that has no individual mapping */
3312 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3314 DP(NETIF_MSG_TX_QUEUED,
3315 "TSO split data size is %d (%x:%x)\n",
3316 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3319 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3324 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3325 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3326 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3328 __sum16 tsum = (__force __sum16) csum;
3331 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3332 csum_partial(t_header - fix, fix, 0)));
3335 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3336 csum_partial(t_header, -fix, 0)));
3338 return bswab16(tsum);
3341 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3347 if (skb->ip_summed != CHECKSUM_PARTIAL)
3350 protocol = vlan_get_protocol(skb);
3351 if (protocol == htons(ETH_P_IPV6)) {
3353 prot = ipv6_hdr(skb)->nexthdr;
3356 prot = ip_hdr(skb)->protocol;
3359 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3360 if (inner_ip_hdr(skb)->version == 6) {
3361 rc |= XMIT_CSUM_ENC_V6;
3362 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3363 rc |= XMIT_CSUM_TCP;
3365 rc |= XMIT_CSUM_ENC_V4;
3366 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3367 rc |= XMIT_CSUM_TCP;
3370 if (prot == IPPROTO_TCP)
3371 rc |= XMIT_CSUM_TCP;
3373 if (skb_is_gso(skb)) {
3374 if (skb_is_gso_v6(skb)) {
3375 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3376 if (rc & XMIT_CSUM_ENC)
3377 rc |= XMIT_GSO_ENC_V6;
3379 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3380 if (rc & XMIT_CSUM_ENC)
3381 rc |= XMIT_GSO_ENC_V4;
3388 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3389 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3391 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3392 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3394 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3395 /* check if packet requires linearization (packet is too fragmented)
3396 no need to check fragmentation if page size > 8K (there will be no
3397 violation to FW restrictions) */
3398 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3401 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3402 int to_copy = 0, hlen = 0;
3404 if (xmit_type & XMIT_GSO_ENC)
3405 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3407 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3408 if (xmit_type & XMIT_GSO) {
3409 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3410 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3411 /* Number of windows to check */
3412 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3417 /* Headers length */
3418 if (xmit_type & XMIT_GSO_ENC)
3419 hlen = (int)(skb_inner_transport_header(skb) -
3421 inner_tcp_hdrlen(skb);
3423 hlen = (int)(skb_transport_header(skb) -
3424 skb->data) + tcp_hdrlen(skb);
3426 /* Amount of data (w/o headers) on linear part of SKB*/
3427 first_bd_sz = skb_headlen(skb) - hlen;
3429 wnd_sum = first_bd_sz;
3431 /* Calculate the first sum - it's special */
3432 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3434 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3436 /* If there was data on linear skb data - check it */
3437 if (first_bd_sz > 0) {
3438 if (unlikely(wnd_sum < lso_mss)) {
3443 wnd_sum -= first_bd_sz;
3446 /* Others are easier: run through the frag list and
3447 check all windows */
3448 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3450 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3452 if (unlikely(wnd_sum < lso_mss)) {
3457 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3460 /* in non-LSO too fragmented packet should always
3467 if (unlikely(to_copy))
3468 DP(NETIF_MSG_TX_QUEUED,
3469 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3470 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3471 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3478 * bnx2x_set_pbd_gso - update PBD in GSO case.
3482 * @xmit_type: xmit flags
3484 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3485 struct eth_tx_parse_bd_e1x *pbd,
3488 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3489 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3490 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3492 if (xmit_type & XMIT_GSO_V4) {
3493 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3494 pbd->tcp_pseudo_csum =
3495 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3497 0, IPPROTO_TCP, 0));
3499 pbd->tcp_pseudo_csum =
3500 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3501 &ipv6_hdr(skb)->daddr,
3502 0, IPPROTO_TCP, 0));
3506 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3510 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3512 * @bp: driver handle
3514 * @parsing_data: data to be updated
3515 * @xmit_type: xmit flags
3517 * 57712/578xx related, when skb has encapsulation
3519 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3520 u32 *parsing_data, u32 xmit_type)
3523 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3524 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3525 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3527 if (xmit_type & XMIT_CSUM_TCP) {
3528 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3529 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3530 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3532 return skb_inner_transport_header(skb) +
3533 inner_tcp_hdrlen(skb) - skb->data;
3536 /* We support checksum offload for TCP and UDP only.
3537 * No need to pass the UDP header length - it's a constant.
3539 return skb_inner_transport_header(skb) +
3540 sizeof(struct udphdr) - skb->data;
3544 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3546 * @bp: driver handle
3548 * @parsing_data: data to be updated
3549 * @xmit_type: xmit flags
3551 * 57712/578xx related
3553 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3554 u32 *parsing_data, u32 xmit_type)
3557 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3558 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3559 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3561 if (xmit_type & XMIT_CSUM_TCP) {
3562 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3563 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3564 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3566 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3568 /* We support checksum offload for TCP and UDP only.
3569 * No need to pass the UDP header length - it's a constant.
3571 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3574 /* set FW indication according to inner or outer protocols if tunneled */
3575 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3576 struct eth_tx_start_bd *tx_start_bd,
3579 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3581 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3582 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3584 if (!(xmit_type & XMIT_CSUM_TCP))
3585 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3589 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3591 * @bp: driver handle
3593 * @pbd: parse BD to be updated
3594 * @xmit_type: xmit flags
3596 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3597 struct eth_tx_parse_bd_e1x *pbd,
3600 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3602 /* for now NS flag is not used in Linux */
3605 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3606 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3608 pbd->ip_hlen_w = (skb_transport_header(skb) -
3609 skb_network_header(skb)) >> 1;
3611 hlen += pbd->ip_hlen_w;
3613 /* We support checksum offload for TCP and UDP only */
3614 if (xmit_type & XMIT_CSUM_TCP)
3615 hlen += tcp_hdrlen(skb) / 2;
3617 hlen += sizeof(struct udphdr) / 2;
3619 pbd->total_hlen_w = cpu_to_le16(hlen);
3622 if (xmit_type & XMIT_CSUM_TCP) {
3623 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3626 s8 fix = SKB_CS_OFF(skb); /* signed! */
3628 DP(NETIF_MSG_TX_QUEUED,
3629 "hlen %d fix %d csum before fix %x\n",
3630 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3632 /* HW bug: fixup the CSUM */
3633 pbd->tcp_pseudo_csum =
3634 bnx2x_csum_fix(skb_transport_header(skb),
3637 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3638 pbd->tcp_pseudo_csum);
3644 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3645 struct eth_tx_parse_bd_e2 *pbd_e2,
3646 struct eth_tx_parse_2nd_bd *pbd2,
3651 u8 outerip_off, outerip_len = 0;
3653 /* from outer IP to transport */
3654 hlen_w = (skb_inner_transport_header(skb) -
3655 skb_network_header(skb)) >> 1;
3658 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3660 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3662 /* outer IP header info */
3663 if (xmit_type & XMIT_CSUM_V4) {
3664 struct iphdr *iph = ip_hdr(skb);
3665 u32 csum = (__force u32)(~iph->check) -
3666 (__force u32)iph->tot_len -
3667 (__force u32)iph->frag_off;
3669 outerip_len = iph->ihl << 1;
3671 pbd2->fw_ip_csum_wo_len_flags_frag =
3672 bswab16(csum_fold((__force __wsum)csum));
3674 pbd2->fw_ip_hdr_to_payload_w =
3675 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3676 pbd_e2->data.tunnel_data.flags |=
3677 ETH_TUNNEL_DATA_IPV6_OUTER;
3680 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3682 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3684 /* inner IP header info */
3685 if (xmit_type & XMIT_CSUM_ENC_V4) {
3686 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3688 pbd_e2->data.tunnel_data.pseudo_csum =
3689 bswab16(~csum_tcpudp_magic(
3690 inner_ip_hdr(skb)->saddr,
3691 inner_ip_hdr(skb)->daddr,
3692 0, IPPROTO_TCP, 0));
3694 pbd_e2->data.tunnel_data.pseudo_csum =
3695 bswab16(~csum_ipv6_magic(
3696 &inner_ipv6_hdr(skb)->saddr,
3697 &inner_ipv6_hdr(skb)->daddr,
3698 0, IPPROTO_TCP, 0));
3701 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3706 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3707 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3708 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3710 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3711 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3712 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3716 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3719 struct ipv6hdr *ipv6;
3721 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3724 if (xmit_type & XMIT_GSO_ENC_V6)
3725 ipv6 = inner_ipv6_hdr(skb);
3726 else /* XMIT_GSO_V6 */
3727 ipv6 = ipv6_hdr(skb);
3729 if (ipv6->nexthdr == NEXTHDR_IPV6)
3730 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3733 /* called with netif_tx_lock
3734 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3735 * netif_wake_queue()
3737 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3739 struct bnx2x *bp = netdev_priv(dev);
3741 struct netdev_queue *txq;
3742 struct bnx2x_fp_txdata *txdata;
3743 struct sw_tx_bd *tx_buf;
3744 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3745 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3746 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3747 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3748 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3749 u32 pbd_e2_parsing_data = 0;
3750 u16 pkt_prod, bd_prod;
3753 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3756 __le16 pkt_size = 0;
3758 u8 mac_type = UNICAST_ADDRESS;
3760 #ifdef BNX2X_STOP_ON_ERROR
3761 if (unlikely(bp->panic))
3762 return NETDEV_TX_BUSY;
3765 txq_index = skb_get_queue_mapping(skb);
3766 txq = netdev_get_tx_queue(dev, txq_index);
3768 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3770 txdata = &bp->bnx2x_txq[txq_index];
3772 /* enable this debug print to view the transmission queue being used
3773 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3774 txq_index, fp_index, txdata_index); */
3776 /* enable this debug print to view the transmission details
3777 DP(NETIF_MSG_TX_QUEUED,
3778 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3779 txdata->cid, fp_index, txdata_index, txdata, fp); */
3781 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3782 skb_shinfo(skb)->nr_frags +
3784 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3785 /* Handle special storage cases separately */
3786 if (txdata->tx_ring_size == 0) {
3787 struct bnx2x_eth_q_stats *q_stats =
3788 bnx2x_fp_qstats(bp, txdata->parent_fp);
3789 q_stats->driver_filtered_tx_pkt++;
3791 return NETDEV_TX_OK;
3793 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3794 netif_tx_stop_queue(txq);
3795 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3797 return NETDEV_TX_BUSY;
3800 DP(NETIF_MSG_TX_QUEUED,
3801 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3802 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3803 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3806 eth = (struct ethhdr *)skb->data;
3808 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3809 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3810 if (is_broadcast_ether_addr(eth->h_dest))
3811 mac_type = BROADCAST_ADDRESS;
3813 mac_type = MULTICAST_ADDRESS;
3816 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3817 /* First, check if we need to linearize the skb (due to FW
3818 restrictions). No need to check fragmentation if page size > 8K
3819 (there will be no violation to FW restrictions) */
3820 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3821 /* Statistics of linearization */
3823 if (skb_linearize(skb) != 0) {
3824 DP(NETIF_MSG_TX_QUEUED,
3825 "SKB linearization failed - silently dropping this SKB\n");
3826 dev_kfree_skb_any(skb);
3827 return NETDEV_TX_OK;
3831 /* Map skb linear data for DMA */
3832 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3833 skb_headlen(skb), DMA_TO_DEVICE);
3834 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3835 DP(NETIF_MSG_TX_QUEUED,
3836 "SKB mapping failed - silently dropping this SKB\n");
3837 dev_kfree_skb_any(skb);
3838 return NETDEV_TX_OK;
3841 Please read carefully. First we use one BD which we mark as start,
3842 then we have a parsing info BD (used for TSO or xsum),
3843 and only then we have the rest of the TSO BDs.
3844 (don't forget to mark the last one as last,
3845 and to unmap only AFTER you write to the BD ...)
3846 And above all, all pdb sizes are in words - NOT DWORDS!
3849 /* get current pkt produced now - advance it just before sending packet
3850 * since mapping of pages may fail and cause packet to be dropped
3852 pkt_prod = txdata->tx_pkt_prod;
3853 bd_prod = TX_BD(txdata->tx_bd_prod);
3855 /* get a tx_buf and first BD
3856 * tx_start_bd may be changed during SPLIT,
3857 * but first_bd will always stay first
3859 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3860 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3861 first_bd = tx_start_bd;
3863 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3865 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3866 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3867 bp->eth_stats.ptp_skip_tx_ts++;
3868 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3869 } else if (bp->ptp_tx_skb) {
3870 bp->eth_stats.ptp_skip_tx_ts++;
3871 dev_err_once(&bp->dev->dev,
3872 "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3874 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3875 /* schedule check for Tx timestamp */
3876 bp->ptp_tx_skb = skb_get(skb);
3877 bp->ptp_tx_start = jiffies;
3878 schedule_work(&bp->ptp_task);
3882 /* header nbd: indirectly zero other flags! */
3883 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3885 /* remember the first BD of the packet */
3886 tx_buf->first_bd = txdata->tx_bd_prod;
3890 DP(NETIF_MSG_TX_QUEUED,
3891 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3892 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3894 if (skb_vlan_tag_present(skb)) {
3895 tx_start_bd->vlan_or_ethertype =
3896 cpu_to_le16(skb_vlan_tag_get(skb));
3897 tx_start_bd->bd_flags.as_bitfield |=
3898 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3900 /* when transmitting in a vf, start bd must hold the ethertype
3901 * for fw to enforce it
3904 #ifndef BNX2X_STOP_ON_ERROR
3907 /* Still need to consider inband vlan for enforced */
3908 if (__vlan_get_tag(skb, &vlan_tci)) {
3909 tx_start_bd->vlan_or_ethertype =
3910 cpu_to_le16(ntohs(eth->h_proto));
3912 tx_start_bd->bd_flags.as_bitfield |=
3913 (X_ETH_INBAND_VLAN <<
3914 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3915 tx_start_bd->vlan_or_ethertype =
3916 cpu_to_le16(vlan_tci);
3918 #ifndef BNX2X_STOP_ON_ERROR
3920 /* used by FW for packet accounting */
3921 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3926 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3928 /* turn on parsing and get a BD */
3929 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3931 if (xmit_type & XMIT_CSUM)
3932 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3934 if (!CHIP_IS_E1x(bp)) {
3935 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3936 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3938 if (xmit_type & XMIT_CSUM_ENC) {
3939 u16 global_data = 0;
3941 /* Set PBD in enc checksum offload case */
3942 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3943 &pbd_e2_parsing_data,
3946 /* turn on 2nd parsing and get a BD */
3947 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3949 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3951 memset(pbd2, 0, sizeof(*pbd2));
3953 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3954 (skb_inner_network_header(skb) -
3957 if (xmit_type & XMIT_GSO_ENC)
3958 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3962 pbd2->global_data = cpu_to_le16(global_data);
3964 /* add addition parse BD indication to start BD */
3965 SET_FLAG(tx_start_bd->general_data,
3966 ETH_TX_START_BD_PARSE_NBDS, 1);
3967 /* set encapsulation flag in start BD */
3968 SET_FLAG(tx_start_bd->general_data,
3969 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3971 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3974 } else if (xmit_type & XMIT_CSUM) {
3975 /* Set PBD in checksum offload case w/o encapsulation */
3976 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3977 &pbd_e2_parsing_data,
3981 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3982 /* Add the macs to the parsing BD if this is a vf or if
3983 * Tx Switching is enabled.
3986 /* override GRE parameters in BD */
3987 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3988 &pbd_e2->data.mac_addr.src_mid,
3989 &pbd_e2->data.mac_addr.src_lo,
3992 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3993 &pbd_e2->data.mac_addr.dst_mid,
3994 &pbd_e2->data.mac_addr.dst_lo,
3997 if (bp->flags & TX_SWITCHING)
3998 bnx2x_set_fw_mac_addr(
3999 &pbd_e2->data.mac_addr.dst_hi,
4000 &pbd_e2->data.mac_addr.dst_mid,
4001 &pbd_e2->data.mac_addr.dst_lo,
4003 #ifdef BNX2X_STOP_ON_ERROR
4004 /* Enforce security is always set in Stop on Error -
4005 * source mac should be present in the parsing BD
4007 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4008 &pbd_e2->data.mac_addr.src_mid,
4009 &pbd_e2->data.mac_addr.src_lo,
4014 SET_FLAG(pbd_e2_parsing_data,
4015 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4017 u16 global_data = 0;
4018 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4019 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4020 /* Set PBD in checksum offload case */
4021 if (xmit_type & XMIT_CSUM)
4022 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4024 SET_FLAG(global_data,
4025 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4026 pbd_e1x->global_data |= cpu_to_le16(global_data);
4029 /* Setup the data pointer of the first BD of the packet */
4030 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4031 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4032 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4033 pkt_size = tx_start_bd->nbytes;
4035 DP(NETIF_MSG_TX_QUEUED,
4036 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4037 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4038 le16_to_cpu(tx_start_bd->nbytes),
4039 tx_start_bd->bd_flags.as_bitfield,
4040 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4042 if (xmit_type & XMIT_GSO) {
4044 DP(NETIF_MSG_TX_QUEUED,
4045 "TSO packet len %d hlen %d total len %d tso size %d\n",
4046 skb->len, hlen, skb_headlen(skb),
4047 skb_shinfo(skb)->gso_size);
4049 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4051 if (unlikely(skb_headlen(skb) > hlen)) {
4053 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4057 if (!CHIP_IS_E1x(bp))
4058 pbd_e2_parsing_data |=
4059 (skb_shinfo(skb)->gso_size <<
4060 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4061 ETH_TX_PARSE_BD_E2_LSO_MSS;
4063 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4066 /* Set the PBD's parsing_data field if not zero
4067 * (for the chips newer than 57711).
4069 if (pbd_e2_parsing_data)
4070 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4072 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4074 /* Handle fragmented skb */
4075 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4076 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4078 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4079 skb_frag_size(frag), DMA_TO_DEVICE);
4080 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4081 unsigned int pkts_compl = 0, bytes_compl = 0;
4083 DP(NETIF_MSG_TX_QUEUED,
4084 "Unable to map page - dropping packet...\n");
4086 /* we need unmap all buffers already mapped
4088 * first_bd->nbd need to be properly updated
4089 * before call to bnx2x_free_tx_pkt
4091 first_bd->nbd = cpu_to_le16(nbd);
4092 bnx2x_free_tx_pkt(bp, txdata,
4093 TX_BD(txdata->tx_pkt_prod),
4094 &pkts_compl, &bytes_compl);
4095 return NETDEV_TX_OK;
4098 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4099 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4100 if (total_pkt_bd == NULL)
4101 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4103 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4104 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4105 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4106 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4109 DP(NETIF_MSG_TX_QUEUED,
4110 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4111 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4112 le16_to_cpu(tx_data_bd->nbytes));
4115 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4117 /* update with actual num BDs */
4118 first_bd->nbd = cpu_to_le16(nbd);
4120 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4122 /* now send a tx doorbell, counting the next BD
4123 * if the packet contains or ends with it
4125 if (TX_BD_POFF(bd_prod) < nbd)
4128 /* total_pkt_bytes should be set on the first data BD if
4129 * it's not an LSO packet and there is more than one
4130 * data BD. In this case pkt_size is limited by an MTU value.
4131 * However we prefer to set it for an LSO packet (while we don't
4132 * have to) in order to save some CPU cycles in a none-LSO
4133 * case, when we much more care about them.
4135 if (total_pkt_bd != NULL)
4136 total_pkt_bd->total_pkt_bytes = pkt_size;
4139 DP(NETIF_MSG_TX_QUEUED,
4140 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4141 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4142 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4143 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4144 le16_to_cpu(pbd_e1x->total_hlen_w));
4146 DP(NETIF_MSG_TX_QUEUED,
4147 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4149 pbd_e2->data.mac_addr.dst_hi,
4150 pbd_e2->data.mac_addr.dst_mid,
4151 pbd_e2->data.mac_addr.dst_lo,
4152 pbd_e2->data.mac_addr.src_hi,
4153 pbd_e2->data.mac_addr.src_mid,
4154 pbd_e2->data.mac_addr.src_lo,
4155 pbd_e2->parsing_data);
4156 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4158 netdev_tx_sent_queue(txq, skb->len);
4160 skb_tx_timestamp(skb);
4162 txdata->tx_pkt_prod++;
4164 * Make sure that the BD data is updated before updating the producer
4165 * since FW might read the BD right after the producer is updated.
4166 * This is only applicable for weak-ordered memory model archs such
4167 * as IA-64. The following barrier is also mandatory since FW will
4168 * assumes packets must have BDs.
4172 txdata->tx_db.data.prod += nbd;
4175 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4179 txdata->tx_bd_prod += nbd;
4181 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4182 netif_tx_stop_queue(txq);
4184 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4185 * ordering of set_bit() in netif_tx_stop_queue() and read of
4189 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4190 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4191 netif_tx_wake_queue(txq);
4195 return NETDEV_TX_OK;
4198 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4200 int mfw_vn = BP_FW_MB_IDX(bp);
4203 /* If the shmem shouldn't affect configuration, reflect */
4204 if (!IS_MF_BD(bp)) {
4207 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4214 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4215 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4216 c2s_map[0] = tmp & 0xff;
4217 c2s_map[1] = (tmp >> 8) & 0xff;
4218 c2s_map[2] = (tmp >> 16) & 0xff;
4219 c2s_map[3] = (tmp >> 24) & 0xff;
4221 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4222 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4223 c2s_map[4] = tmp & 0xff;
4224 c2s_map[5] = (tmp >> 8) & 0xff;
4225 c2s_map[6] = (tmp >> 16) & 0xff;
4226 c2s_map[7] = (tmp >> 24) & 0xff;
4228 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4229 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4230 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4234 * bnx2x_setup_tc - routine to configure net_device for multi tc
4236 * @netdev: net device to configure
4237 * @tc: number of traffic classes to enable
4239 * callback connected to the ndo_setup_tc function pointer
4241 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4243 struct bnx2x *bp = netdev_priv(dev);
4244 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4245 int cos, prio, count, offset;
4247 /* setup tc must be called under rtnl lock */
4250 /* no traffic classes requested. Aborting */
4252 netdev_reset_tc(dev);
4256 /* requested to support too many traffic classes */
4257 if (num_tc > bp->max_cos) {
4258 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4259 num_tc, bp->max_cos);
4263 /* declare amount of supported traffic classes */
4264 if (netdev_set_num_tc(dev, num_tc)) {
4265 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4269 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4271 /* configure priority to traffic class mapping */
4272 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4273 int outer_prio = c2s_map[prio];
4275 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4276 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4277 "mapping priority %d to tc %d\n",
4278 outer_prio, bp->prio_to_cos[outer_prio]);
4281 /* Use this configuration to differentiate tc0 from other COSes
4282 This can be used for ets or pfc, and save the effort of setting
4283 up a multio class queue disc or negotiating DCBX with a switch
4284 netdev_set_prio_tc_map(dev, 0, 0);
4285 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4286 for (prio = 1; prio < 16; prio++) {
4287 netdev_set_prio_tc_map(dev, prio, 1);
4288 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4291 /* configure traffic class to transmission queue mapping */
4292 for (cos = 0; cos < bp->max_cos; cos++) {
4293 count = BNX2X_NUM_ETH_QUEUES(bp);
4294 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4295 netdev_set_tc_queue(dev, cos, count, offset);
4296 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4297 "mapping tc %d to offset %d count %d\n",
4298 cos, offset, count);
4304 int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
4305 struct tc_to_netdev *tc)
4307 if (tc->type != TC_SETUP_MQPRIO)
4309 return bnx2x_setup_tc(dev, tc->tc);
4312 /* called with rtnl_lock */
4313 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4315 struct sockaddr *addr = p;
4316 struct bnx2x *bp = netdev_priv(dev);
4319 if (!is_valid_ether_addr(addr->sa_data)) {
4320 BNX2X_ERR("Requested MAC address is not valid\n");
4324 if (IS_MF_STORAGE_ONLY(bp)) {
4325 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4329 if (netif_running(dev)) {
4330 rc = bnx2x_set_eth_mac(bp, false);
4335 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4337 if (netif_running(dev))
4338 rc = bnx2x_set_eth_mac(bp, true);
4340 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4341 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4346 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4348 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4349 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4354 if (IS_FCOE_IDX(fp_index)) {
4355 memset(sb, 0, sizeof(union host_hc_status_block));
4356 fp->status_blk_mapping = 0;
4359 if (!CHIP_IS_E1x(bp))
4360 BNX2X_PCI_FREE(sb->e2_sb,
4361 bnx2x_fp(bp, fp_index,
4362 status_blk_mapping),
4363 sizeof(struct host_hc_status_block_e2));
4365 BNX2X_PCI_FREE(sb->e1x_sb,
4366 bnx2x_fp(bp, fp_index,
4367 status_blk_mapping),
4368 sizeof(struct host_hc_status_block_e1x));
4372 if (!skip_rx_queue(bp, fp_index)) {
4373 bnx2x_free_rx_bds(fp);
4375 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4376 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4377 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4378 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4379 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4381 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4382 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4383 sizeof(struct eth_fast_path_rx_cqe) *
4387 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4388 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4389 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4390 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4394 if (!skip_tx_queue(bp, fp_index)) {
4395 /* fastpath tx rings: tx_buf tx_desc */
4396 for_each_cos_in_tx_queue(fp, cos) {
4397 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4399 DP(NETIF_MSG_IFDOWN,
4400 "freeing tx memory of fp %d cos %d cid %d\n",
4401 fp_index, cos, txdata->cid);
4403 BNX2X_FREE(txdata->tx_buf_ring);
4404 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4405 txdata->tx_desc_mapping,
4406 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4409 /* end of fastpath */
4412 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4415 for_each_cnic_queue(bp, i)
4416 bnx2x_free_fp_mem_at(bp, i);
4419 void bnx2x_free_fp_mem(struct bnx2x *bp)
4422 for_each_eth_queue(bp, i)
4423 bnx2x_free_fp_mem_at(bp, i);
4426 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4428 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4429 if (!CHIP_IS_E1x(bp)) {
4430 bnx2x_fp(bp, index, sb_index_values) =
4431 (__le16 *)status_blk.e2_sb->sb.index_values;
4432 bnx2x_fp(bp, index, sb_running_index) =
4433 (__le16 *)status_blk.e2_sb->sb.running_index;
4435 bnx2x_fp(bp, index, sb_index_values) =
4436 (__le16 *)status_blk.e1x_sb->sb.index_values;
4437 bnx2x_fp(bp, index, sb_running_index) =
4438 (__le16 *)status_blk.e1x_sb->sb.running_index;
4442 /* Returns the number of actually allocated BDs */
4443 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4446 struct bnx2x *bp = fp->bp;
4447 u16 ring_prod, cqe_ring_prod;
4448 int i, failure_cnt = 0;
4450 fp->rx_comp_cons = 0;
4451 cqe_ring_prod = ring_prod = 0;
4453 /* This routine is called only during fo init so
4454 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4456 for (i = 0; i < rx_ring_size; i++) {
4457 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4461 ring_prod = NEXT_RX_IDX(ring_prod);
4462 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4463 WARN_ON(ring_prod <= (i - failure_cnt));
4467 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4468 i - failure_cnt, fp->index);
4470 fp->rx_bd_prod = ring_prod;
4471 /* Limit the CQE producer by the CQE ring size */
4472 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4475 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4477 return i - failure_cnt;
4480 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4484 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4485 struct eth_rx_cqe_next_page *nextpg;
4487 nextpg = (struct eth_rx_cqe_next_page *)
4488 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4490 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4491 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4493 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4494 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4498 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4500 union host_hc_status_block *sb;
4501 struct bnx2x_fastpath *fp = &bp->fp[index];
4504 int rx_ring_size = 0;
4506 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4507 rx_ring_size = MIN_RX_SIZE_NONTPA;
4508 bp->rx_ring_size = rx_ring_size;
4509 } else if (!bp->rx_ring_size) {
4510 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4512 if (CHIP_IS_E3(bp)) {
4513 u32 cfg = SHMEM_RD(bp,
4514 dev_info.port_hw_config[BP_PORT(bp)].
4517 /* Decrease ring size for 1G functions */
4518 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4519 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4523 /* allocate at least number of buffers required by FW */
4524 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4525 MIN_RX_SIZE_TPA, rx_ring_size);
4527 bp->rx_ring_size = rx_ring_size;
4528 } else /* if rx_ring_size specified - use it */
4529 rx_ring_size = bp->rx_ring_size;
4531 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4534 sb = &bnx2x_fp(bp, index, status_blk);
4536 if (!IS_FCOE_IDX(index)) {
4538 if (!CHIP_IS_E1x(bp)) {
4539 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4540 sizeof(struct host_hc_status_block_e2));
4544 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4545 sizeof(struct host_hc_status_block_e1x));
4551 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4552 * set shortcuts for it.
4554 if (!IS_FCOE_IDX(index))
4555 set_sb_shortcuts(bp, index);
4558 if (!skip_tx_queue(bp, index)) {
4559 /* fastpath tx rings: tx_buf tx_desc */
4560 for_each_cos_in_tx_queue(fp, cos) {
4561 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4564 "allocating tx memory of fp %d cos %d\n",
4567 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4568 sizeof(struct sw_tx_bd),
4570 if (!txdata->tx_buf_ring)
4572 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4573 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4574 if (!txdata->tx_desc_ring)
4580 if (!skip_rx_queue(bp, index)) {
4581 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4582 bnx2x_fp(bp, index, rx_buf_ring) =
4583 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4584 if (!bnx2x_fp(bp, index, rx_buf_ring))
4586 bnx2x_fp(bp, index, rx_desc_ring) =
4587 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4588 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4589 if (!bnx2x_fp(bp, index, rx_desc_ring))
4592 /* Seed all CQEs by 1s */
4593 bnx2x_fp(bp, index, rx_comp_ring) =
4594 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4595 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4596 if (!bnx2x_fp(bp, index, rx_comp_ring))
4600 bnx2x_fp(bp, index, rx_page_ring) =
4601 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4603 if (!bnx2x_fp(bp, index, rx_page_ring))
4605 bnx2x_fp(bp, index, rx_sge_ring) =
4606 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4607 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4608 if (!bnx2x_fp(bp, index, rx_sge_ring))
4611 bnx2x_set_next_page_rx_bd(fp);
4614 bnx2x_set_next_page_rx_cq(fp);
4617 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4618 if (ring_size < rx_ring_size)
4624 /* handles low memory cases */
4626 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4628 /* FW will drop all packets if queue is not big enough,
4629 * In these cases we disable the queue
4630 * Min size is different for OOO, TPA and non-TPA queues
4632 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4633 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4634 /* release memory allocated for this queue */
4635 bnx2x_free_fp_mem_at(bp, index);
4641 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4645 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4646 /* we will fail load process instead of mark
4654 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4658 /* 1. Allocate FP for leading - fatal if error
4659 * 2. Allocate RSS - fix number of queues if error
4663 if (bnx2x_alloc_fp_mem_at(bp, 0))
4667 for_each_nondefault_eth_queue(bp, i)
4668 if (bnx2x_alloc_fp_mem_at(bp, i))
4671 /* handle memory failures */
4672 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4673 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4676 bnx2x_shrink_eth_fp(bp, delta);
4677 if (CNIC_SUPPORT(bp))
4678 /* move non eth FPs next to last eth FP
4679 * must be done in that order
4680 * FCOE_IDX < FWD_IDX < OOO_IDX
4683 /* move FCoE fp even NO_FCOE_FLAG is on */
4684 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4685 bp->num_ethernet_queues -= delta;
4686 bp->num_queues = bp->num_ethernet_queues +
4687 bp->num_cnic_queues;
4688 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4689 bp->num_queues + delta, bp->num_queues);
4695 void bnx2x_free_mem_bp(struct bnx2x *bp)
4699 for (i = 0; i < bp->fp_array_size; i++)
4700 kfree(bp->fp[i].tpa_info);
4703 kfree(bp->fp_stats);
4704 kfree(bp->bnx2x_txq);
4705 kfree(bp->msix_table);
4709 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4711 struct bnx2x_fastpath *fp;
4712 struct msix_entry *tbl;
4713 struct bnx2x_ilt *ilt;
4714 int msix_table_size = 0;
4715 int fp_array_size, txq_array_size;
4719 * The biggest MSI-X table we might need is as a maximum number of fast
4720 * path IGU SBs plus default SB (for PF only).
4722 msix_table_size = bp->igu_sb_cnt;
4725 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4727 /* fp array: RSS plus CNIC related L2 queues */
4728 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4729 bp->fp_array_size = fp_array_size;
4730 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4732 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4735 for (i = 0; i < bp->fp_array_size; i++) {
4737 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4738 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4739 if (!(fp[i].tpa_info))
4745 /* allocate sp objs */
4746 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4751 /* allocate fp_stats */
4752 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4757 /* Allocate memory for the transmission queues array */
4759 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4760 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4762 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4768 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4771 bp->msix_table = tbl;
4774 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4781 bnx2x_free_mem_bp(bp);
4785 int bnx2x_reload_if_running(struct net_device *dev)
4787 struct bnx2x *bp = netdev_priv(dev);
4789 if (unlikely(!netif_running(dev)))
4792 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4793 return bnx2x_nic_load(bp, LOAD_NORMAL);
4796 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4798 u32 sel_phy_idx = 0;
4799 if (bp->link_params.num_phys <= 1)
4802 if (bp->link_vars.link_up) {
4803 sel_phy_idx = EXT_PHY1;
4804 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4805 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4806 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4807 sel_phy_idx = EXT_PHY2;
4810 switch (bnx2x_phy_selection(&bp->link_params)) {
4811 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4812 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4813 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4814 sel_phy_idx = EXT_PHY1;
4816 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4817 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4818 sel_phy_idx = EXT_PHY2;
4825 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4827 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4829 * The selected activated PHY is always after swapping (in case PHY
4830 * swapping is enabled). So when swapping is enabled, we need to reverse
4834 if (bp->link_params.multi_phy_config &
4835 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4836 if (sel_phy_idx == EXT_PHY1)
4837 sel_phy_idx = EXT_PHY2;
4838 else if (sel_phy_idx == EXT_PHY2)
4839 sel_phy_idx = EXT_PHY1;
4841 return LINK_CONFIG_IDX(sel_phy_idx);
4844 #ifdef NETDEV_FCOE_WWNN
4845 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4847 struct bnx2x *bp = netdev_priv(dev);
4848 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4851 case NETDEV_FCOE_WWNN:
4852 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4853 cp->fcoe_wwn_node_name_lo);
4855 case NETDEV_FCOE_WWPN:
4856 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4857 cp->fcoe_wwn_port_name_lo);
4860 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4868 /* called with rtnl_lock */
4869 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4871 struct bnx2x *bp = netdev_priv(dev);
4873 if (pci_num_vf(bp->pdev)) {
4874 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4878 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4879 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4883 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4884 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4885 BNX2X_ERR("Can't support requested MTU size\n");
4889 /* This does not race with packet allocation
4890 * because the actual alloc size is
4891 * only updated as part of load
4895 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4896 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4898 return bnx2x_reload_if_running(dev);
4901 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4902 netdev_features_t features)
4904 struct bnx2x *bp = netdev_priv(dev);
4906 if (pci_num_vf(bp->pdev)) {
4907 netdev_features_t changed = dev->features ^ features;
4909 /* Revert the requested changes in features if they
4910 * would require internal reload of PF in bnx2x_set_features().
4912 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4913 features &= ~NETIF_F_RXCSUM;
4914 features |= dev->features & NETIF_F_RXCSUM;
4917 if (changed & NETIF_F_LOOPBACK) {
4918 features &= ~NETIF_F_LOOPBACK;
4919 features |= dev->features & NETIF_F_LOOPBACK;
4923 /* TPA requires Rx CSUM offloading */
4924 if (!(features & NETIF_F_RXCSUM)) {
4925 features &= ~NETIF_F_LRO;
4926 features &= ~NETIF_F_GRO;
4932 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4934 struct bnx2x *bp = netdev_priv(dev);
4935 netdev_features_t changes = features ^ dev->features;
4936 bool bnx2x_reload = false;
4939 /* VFs or non SRIOV PFs should be able to change loopback feature */
4940 if (!pci_num_vf(bp->pdev)) {
4941 if (features & NETIF_F_LOOPBACK) {
4942 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4943 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4944 bnx2x_reload = true;
4947 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4948 bp->link_params.loopback_mode = LOOPBACK_NONE;
4949 bnx2x_reload = true;
4954 /* if GRO is changed while LRO is enabled, don't force a reload */
4955 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4956 changes &= ~NETIF_F_GRO;
4958 /* if GRO is changed while HW TPA is off, don't force a reload */
4959 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4960 changes &= ~NETIF_F_GRO;
4963 bnx2x_reload = true;
4966 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4967 dev->features = features;
4968 rc = bnx2x_reload_if_running(dev);
4971 /* else: bnx2x_nic_load() will be called at end of recovery */
4977 void bnx2x_tx_timeout(struct net_device *dev)
4979 struct bnx2x *bp = netdev_priv(dev);
4981 #ifdef BNX2X_STOP_ON_ERROR
4986 /* This allows the netif to be shutdown gracefully before resetting */
4987 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4990 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4992 struct net_device *dev = pci_get_drvdata(pdev);
4996 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4999 bp = netdev_priv(dev);
5003 pci_save_state(pdev);
5005 if (!netif_running(dev)) {
5010 netif_device_detach(dev);
5012 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5014 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5021 int bnx2x_resume(struct pci_dev *pdev)
5023 struct net_device *dev = pci_get_drvdata(pdev);
5028 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5031 bp = netdev_priv(dev);
5033 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5034 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5040 pci_restore_state(pdev);
5042 if (!netif_running(dev)) {
5047 bnx2x_set_power_state(bp, PCI_D0);
5048 netif_device_attach(dev);
5050 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5057 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5061 BNX2X_ERR("bad context pointer %p\n", cxt);
5065 /* ustorm cxt validation */
5066 cxt->ustorm_ag_context.cdu_usage =
5067 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5068 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5069 /* xcontext validation */
5070 cxt->xstorm_ag_context.cdu_reserved =
5071 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5072 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5075 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5076 u8 fw_sb_id, u8 sb_index,
5079 u32 addr = BAR_CSTRORM_INTMEM +
5080 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5081 REG_WR8(bp, addr, ticks);
5083 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5084 port, fw_sb_id, sb_index, ticks);
5087 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5088 u16 fw_sb_id, u8 sb_index,
5091 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5092 u32 addr = BAR_CSTRORM_INTMEM +
5093 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5094 u8 flags = REG_RD8(bp, addr);
5096 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5097 flags |= enable_flag;
5098 REG_WR8(bp, addr, flags);
5100 "port %x fw_sb_id %d sb_index %d disable %d\n",
5101 port, fw_sb_id, sb_index, disable);
5104 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5105 u8 sb_index, u8 disable, u16 usec)
5107 int port = BP_PORT(bp);
5108 u8 ticks = usec / BNX2X_BTR;
5110 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5112 disable = disable ? 1 : (usec ? 0 : 1);
5113 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5116 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5119 smp_mb__before_atomic();
5120 set_bit(flag, &bp->sp_rtnl_state);
5121 smp_mb__after_atomic();
5122 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5124 schedule_delayed_work(&bp->sp_rtnl_task, 0);