1 /* bnx2x_cmn.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
26 #include <linux/crash_dump.h>
29 #include <net/ip6_checksum.h>
30 #include <linux/prefetch.h>
31 #include "bnx2x_cmn.h"
32 #include "bnx2x_init.h"
35 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
36 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
38 static int bnx2x_poll(struct napi_struct *napi, int budget);
40 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
44 /* Add NAPI objects */
45 for_each_rx_queue_cnic(bp, i) {
46 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
47 bnx2x_poll, NAPI_POLL_WEIGHT);
51 static void bnx2x_add_all_napi(struct bnx2x *bp)
55 /* Add NAPI objects */
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66 /* Reduce memory usage in kdump environment by using only one queue */
67 if (is_kdump_kernel())
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target. Update txdata pointers and related
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99 /* Copy the NAPI object as it has been already initialized */
100 from_fp->napi = to_fp->napi;
102 /* Move bnx2x_fastpath contents */
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
106 /* Retain the tpa_info of the original `to' version as we don't want
107 * 2 FPs to contain the same tpa_info pointer.
109 to_fp->tpa_info = old_tpa_info;
111 /* move sp_objs contents as well, as their indices match fp ones */
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114 /* move fp_stats contents as well, as their indices match fp ones */
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117 /* Update txdata pointers in fp and move txdata content accordingly:
118 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 * decremented by max_cos x delta.
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
140 * @buf: character buffer to fill with the fw name
141 * @buf_len: length of the above buffer
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
168 * @delta: number of eth queues which were not allocated
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175 * backward along the array could cause memory to be overridden
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191 /* free skb in the packet ring at pos idx
192 * return idx of last bd freed
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 u16 split_bd_len = 0;
206 /* prefetch skb end pointer to speedup dev_kfree_skb() */
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
221 new_cons = nbd + tx_buf->first_bd;
223 /* Get the next bd */
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226 /* Skip a parse bd... */
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231 /* Skip second parse bd... */
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
263 (*bytes_compl) += skb->len;
264 dev_kfree_skb_any(skb);
267 tx_buf->first_bd = 0;
273 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 struct netdev_queue *txq;
276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277 unsigned int pkts_compl = 0, bytes_compl = 0;
279 #ifdef BNX2X_STOP_ON_ERROR
280 if (unlikely(bp->panic))
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286 sw_cons = txdata->tx_pkt_cons;
288 /* Ensure subsequent loads occur after hw_cons */
291 while (sw_cons != hw_cons) {
294 pkt_cons = TX_BD(sw_cons);
296 DP(NETIF_MSG_TX_DONE,
297 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
298 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
301 &pkts_compl, &bytes_compl);
306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
308 txdata->tx_pkt_cons = sw_cons;
309 txdata->tx_bd_cons = bd_cons;
311 /* Need to make the tx_bd_cons update visible to start_xmit()
312 * before checking for netif_tx_queue_stopped(). Without the
313 * memory barrier, there is a small possibility that
314 * start_xmit() will miss it and cause the queue to be stopped
316 * On the other hand we need an rmb() here to ensure the proper
317 * ordering of bit testing in the following
318 * netif_tx_queue_stopped(txq) call.
322 if (unlikely(netif_tx_queue_stopped(txq))) {
323 /* Taking tx_lock() is needed to prevent re-enabling the queue
324 * while it's empty. This could have happen if rx_action() gets
325 * suspended in bnx2x_tx_int() after the condition before
326 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
328 * stops the queue->sees fresh tx_bd_cons->releases the queue->
329 * sends some packets consuming the whole queue again->
333 __netif_tx_lock(txq, smp_processor_id());
335 if ((netif_tx_queue_stopped(txq)) &&
336 (bp->state == BNX2X_STATE_OPEN) &&
337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
338 netif_tx_wake_queue(txq);
340 __netif_tx_unlock(txq);
345 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
348 u16 last_max = fp->last_max_sge;
350 if (SUB_S16(idx, last_max) > 0)
351 fp->last_max_sge = idx;
354 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
356 struct eth_end_agg_rx_cqe *cqe)
358 struct bnx2x *bp = fp->bp;
359 u16 last_max, last_elem, first_elem;
366 /* First mark all used pages */
367 for (i = 0; i < sge_len; i++)
368 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
374 /* Here we assume that the last SGE index is the biggest */
375 prefetch((void *)(fp->sge_mask));
376 bnx2x_update_last_max_sge(fp,
377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
379 last_max = RX_SGE(fp->last_max_sge);
380 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
383 /* If ring is not full */
384 if (last_elem + 1 != first_elem)
387 /* Now update the prod */
388 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389 if (likely(fp->sge_mask[i]))
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393 delta += BIT_VEC64_ELEM_SZ;
397 fp->rx_sge_prod += delta;
398 /* clear page-end entries */
399 bnx2x_clear_sge_mask_next_elems(fp);
402 DP(NETIF_MSG_RX_STATUS,
403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
404 fp->last_max_sge, fp->rx_sge_prod);
407 /* Get Toeplitz hash value in the skb using the value from the
408 * CQE (calculated by HW).
410 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
411 const struct eth_fast_path_rx_cqe *cqe,
412 enum pkt_hash_types *rxhash_type)
414 /* Get Toeplitz hash from CQE */
415 if ((bp->dev->features & NETIF_F_RXHASH) &&
416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417 enum eth_rss_hash_type htype;
419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
420 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421 (htype == TCP_IPV6_HASH_TYPE)) ?
422 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
424 return le32_to_cpu(cqe->rss_hash_result);
426 *rxhash_type = PKT_HASH_TYPE_NONE;
430 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
432 struct eth_fast_path_rx_cqe *cqe)
434 struct bnx2x *bp = fp->bp;
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
442 /* print error if current state != stop */
443 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
444 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
446 /* Try to map an empty data buffer from the aggregation info */
447 mapping = dma_map_single(&bp->pdev->dev,
448 first_buf->data + NET_SKB_PAD,
449 fp->rx_buf_size, DMA_FROM_DEVICE);
451 * ...if it fails - move the skb from the consumer to the producer
452 * and set the current aggregation state as ERROR to drop it
453 * when TPA_STOP arrives.
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457 /* Move the BD from the consumer to the producer */
458 bnx2x_reuse_rx_data(fp, cons, prod);
459 tpa_info->tpa_state = BNX2X_TPA_ERROR;
463 /* move empty data from pool to prod */
464 prod_rx_buf->data = first_buf->data;
465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
466 /* point prod_bd to new data */
467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
470 /* move partial skb from cons to pool (don't unmap yet) */
471 *first_buf = *cons_rx_buf;
473 /* mark bin state as START */
474 tpa_info->parsing_flags =
475 le16_to_cpu(cqe->pars_flags.flags);
476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477 tpa_info->tpa_state = BNX2X_TPA_START;
478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479 tpa_info->placement_offset = cqe->placement_offset;
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
481 if (fp->mode == TPA_MODE_GRO) {
482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
484 tpa_info->gro_size = gro_size;
487 #ifdef BNX2X_STOP_ON_ERROR
488 fp->tpa_queue_used |= (1 << queue);
489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
494 /* Timestamp option length allowed for TPA aggregation:
496 * nop nop kind length echo val
498 #define TPA_TSTAMP_OPT_LEN 12
500 * bnx2x_set_gro_params - compute GRO values
503 * @parsing_flags: parsing flags from the START CQE
504 * @len_on_bd: total length of the first packet for the
506 * @pkt_len: length of all segments
507 * @num_of_coalesced_segs: count of segments
509 * Approximate value of the MSS for this aggregation calculated using
510 * the first packet of it.
511 * Compute number of aggregated segments, and gso_type.
513 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
514 u16 len_on_bd, unsigned int pkt_len,
515 u16 num_of_coalesced_segs)
517 /* TPA aggregation won't have either IP options or TCP options
518 * other than timestamp or IPv6 extension headers.
520 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
522 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
523 PRS_FLAG_OVERETH_IPV6) {
524 hdrs_len += sizeof(struct ipv6hdr);
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
527 hdrs_len += sizeof(struct iphdr);
528 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
531 /* Check if there was a TCP timestamp, if there is it's will
532 * always be 12 bytes length: nop nop kind length echo val.
534 * Otherwise FW would close the aggregation.
536 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537 hdrs_len += TPA_TSTAMP_OPT_LEN;
539 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
541 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542 * to skb_shinfo(skb)->gso_segs
544 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
547 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548 u16 index, gfp_t gfp_mask)
550 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
551 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
552 struct bnx2x_alloc_pool *pool = &fp->page_pool;
556 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
557 if (unlikely(!pool->page))
563 mapping = dma_map_page(&bp->pdev->dev, pool->page,
564 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
565 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
566 BNX2X_ERR("Can't map sge\n");
570 sw_buf->page = pool->page;
571 sw_buf->offset = pool->offset;
573 dma_unmap_addr_set(sw_buf, mapping, mapping);
575 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
576 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
578 pool->offset += SGE_PAGE_SIZE;
579 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
580 get_page(pool->page);
586 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
587 struct bnx2x_agg_info *tpa_info,
590 struct eth_end_agg_rx_cqe *cqe,
593 struct sw_rx_page *rx_pg, old_rx_pg;
594 u32 i, frag_len, frag_size;
595 int err, j, frag_id = 0;
596 u16 len_on_bd = tpa_info->len_on_bd;
597 u16 full_page = 0, gro_size = 0;
599 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
601 if (fp->mode == TPA_MODE_GRO) {
602 gro_size = tpa_info->gro_size;
603 full_page = tpa_info->full_page;
606 /* This is needed in order to enable forwarding support */
608 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
609 le16_to_cpu(cqe->pkt_len),
610 le16_to_cpu(cqe->num_of_coalesced_segs));
612 #ifdef BNX2X_STOP_ON_ERROR
613 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
614 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
616 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
622 /* Run through the SGL and compose the fragmented skb */
623 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
624 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
626 /* FW gives the indices of the SGE as if the ring is an array
627 (meaning that "next" element will consume 2 indices) */
628 if (fp->mode == TPA_MODE_GRO)
629 frag_len = min_t(u32, frag_size, (u32)full_page);
631 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
633 rx_pg = &fp->rx_page_ring[sge_idx];
636 /* If we fail to allocate a substitute page, we simply stop
637 where we are and drop the whole packet */
638 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
640 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
644 dma_unmap_page(&bp->pdev->dev,
645 dma_unmap_addr(&old_rx_pg, mapping),
646 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
647 /* Add one frag and update the appropriate fields in the skb */
648 if (fp->mode == TPA_MODE_LRO)
649 skb_fill_page_desc(skb, j, old_rx_pg.page,
650 old_rx_pg.offset, frag_len);
654 for (rem = frag_len; rem > 0; rem -= gro_size) {
655 int len = rem > gro_size ? gro_size : rem;
656 skb_fill_page_desc(skb, frag_id++,
658 old_rx_pg.offset + offset,
661 get_page(old_rx_pg.page);
666 skb->data_len += frag_len;
667 skb->truesize += SGE_PAGES;
668 skb->len += frag_len;
670 frag_size -= frag_len;
676 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
678 if (fp->rx_frag_size)
684 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
686 if (fp->rx_frag_size) {
687 /* GFP_KERNEL allocations are used only during initialization */
688 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
689 return (void *)__get_free_page(gfp_mask);
691 return napi_alloc_frag(fp->rx_frag_size);
694 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
698 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
700 const struct iphdr *iph = ip_hdr(skb);
703 skb_set_transport_header(skb, sizeof(struct iphdr));
706 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
707 iph->saddr, iph->daddr, 0);
710 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
712 struct ipv6hdr *iph = ipv6_hdr(skb);
715 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
718 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
719 &iph->saddr, &iph->daddr, 0);
722 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
723 void (*gro_func)(struct bnx2x*, struct sk_buff*))
725 skb_reset_network_header(skb);
727 tcp_gro_complete(skb);
731 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
735 if (skb_shinfo(skb)->gso_size) {
736 switch (be16_to_cpu(skb->protocol)) {
738 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
741 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
744 netdev_WARN_ONCE(bp->dev,
745 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
746 be16_to_cpu(skb->protocol));
750 skb_record_rx_queue(skb, fp->rx_queue);
751 napi_gro_receive(&fp->napi, skb);
754 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
755 struct bnx2x_agg_info *tpa_info,
757 struct eth_end_agg_rx_cqe *cqe,
760 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
761 u8 pad = tpa_info->placement_offset;
762 u16 len = tpa_info->len_on_bd;
763 struct sk_buff *skb = NULL;
764 u8 *new_data, *data = rx_buf->data;
765 u8 old_tpa_state = tpa_info->tpa_state;
767 tpa_info->tpa_state = BNX2X_TPA_STOP;
769 /* If we there was an error during the handling of the TPA_START -
770 * drop this aggregation.
772 if (old_tpa_state == BNX2X_TPA_ERROR)
775 /* Try to allocate the new data */
776 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
777 /* Unmap skb in the pool anyway, as we are going to change
778 pool entry status to BNX2X_TPA_STOP even if new skb allocation
780 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
781 fp->rx_buf_size, DMA_FROM_DEVICE);
782 if (likely(new_data))
783 skb = build_skb(data, fp->rx_frag_size);
786 #ifdef BNX2X_STOP_ON_ERROR
787 if (pad + len > fp->rx_buf_size) {
788 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
789 pad, len, fp->rx_buf_size);
791 bnx2x_frag_free(fp, new_data);
796 skb_reserve(skb, pad + NET_SKB_PAD);
798 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
800 skb->protocol = eth_type_trans(skb, bp->dev);
801 skb->ip_summed = CHECKSUM_UNNECESSARY;
803 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
804 skb, cqe, cqe_idx)) {
805 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
807 bnx2x_gro_receive(bp, fp, skb);
809 DP(NETIF_MSG_RX_STATUS,
810 "Failed to allocate new pages - dropping packet!\n");
811 dev_kfree_skb_any(skb);
814 /* put new data in bin */
815 rx_buf->data = new_data;
820 bnx2x_frag_free(fp, new_data);
822 /* drop the packet and keep the buffer in the bin */
823 DP(NETIF_MSG_RX_STATUS,
824 "Failed to allocate or map a new skb - dropping packet!\n");
825 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
828 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829 u16 index, gfp_t gfp_mask)
832 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
833 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
836 data = bnx2x_frag_alloc(fp, gfp_mask);
837 if (unlikely(data == NULL))
840 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
843 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
844 bnx2x_frag_free(fp, data);
845 BNX2X_ERR("Can't map rx data\n");
850 dma_unmap_addr_set(rx_buf, mapping, mapping);
852 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
853 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
859 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
860 struct bnx2x_fastpath *fp,
861 struct bnx2x_eth_q_stats *qstats)
863 /* Do nothing if no L4 csum validation was done.
864 * We do not check whether IP csum was validated. For IPv4 we assume
865 * that if the card got as far as validating the L4 csum, it also
866 * validated the IP csum. IPv6 has no IP csum.
868 if (cqe->fast_path_cqe.status_flags &
869 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
872 /* If L4 validation was done, check if an error was found. */
874 if (cqe->fast_path_cqe.type_error_flags &
875 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
876 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
877 qstats->hw_csum_err++;
879 skb->ip_summed = CHECKSUM_UNNECESSARY;
882 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
884 struct bnx2x *bp = fp->bp;
885 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
886 u16 sw_comp_cons, sw_comp_prod;
888 union eth_rx_cqe *cqe;
889 struct eth_fast_path_rx_cqe *cqe_fp;
891 #ifdef BNX2X_STOP_ON_ERROR
892 if (unlikely(bp->panic))
898 bd_cons = fp->rx_bd_cons;
899 bd_prod = fp->rx_bd_prod;
900 bd_prod_fw = bd_prod;
901 sw_comp_cons = fp->rx_comp_cons;
902 sw_comp_prod = fp->rx_comp_prod;
904 comp_ring_cons = RCQ_BD(sw_comp_cons);
905 cqe = &fp->rx_comp_ring[comp_ring_cons];
906 cqe_fp = &cqe->fast_path_cqe;
908 DP(NETIF_MSG_RX_STATUS,
909 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
911 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
912 struct sw_rx_bd *rx_buf = NULL;
915 enum eth_rx_cqe_type cqe_fp_type;
919 enum pkt_hash_types rxhash_type;
921 #ifdef BNX2X_STOP_ON_ERROR
922 if (unlikely(bp->panic))
926 bd_prod = RX_BD(bd_prod);
927 bd_cons = RX_BD(bd_cons);
929 /* A rmb() is required to ensure that the CQE is not read
930 * before it is written by the adapter DMA. PCI ordering
931 * rules will make sure the other fields are written before
932 * the marker at the end of struct eth_fast_path_rx_cqe
933 * but without rmb() a weakly ordered processor can process
934 * stale data. Without the barrier TPA state-machine might
935 * enter inconsistent state and kernel stack might be
936 * provided with incorrect packet description - these lead
937 * to various kernel crashed.
941 cqe_fp_flags = cqe_fp->type_error_flags;
942 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
944 DP(NETIF_MSG_RX_STATUS,
945 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
946 CQE_TYPE(cqe_fp_flags),
947 cqe_fp_flags, cqe_fp->status_flags,
948 le32_to_cpu(cqe_fp->rss_hash_result),
949 le16_to_cpu(cqe_fp->vlan_tag),
950 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
952 /* is this a slowpath msg? */
953 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
954 bnx2x_sp_event(fp, cqe);
958 rx_buf = &fp->rx_buf_ring[bd_cons];
961 if (!CQE_TYPE_FAST(cqe_fp_type)) {
962 struct bnx2x_agg_info *tpa_info;
963 u16 frag_size, pages;
964 #ifdef BNX2X_STOP_ON_ERROR
966 if (fp->mode == TPA_MODE_DISABLED &&
967 (CQE_TYPE_START(cqe_fp_type) ||
968 CQE_TYPE_STOP(cqe_fp_type)))
969 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
970 CQE_TYPE(cqe_fp_type));
973 if (CQE_TYPE_START(cqe_fp_type)) {
974 u16 queue = cqe_fp->queue_index;
975 DP(NETIF_MSG_RX_STATUS,
976 "calling tpa_start on queue %d\n",
979 bnx2x_tpa_start(fp, queue,
985 queue = cqe->end_agg_cqe.queue_index;
986 tpa_info = &fp->tpa_info[queue];
987 DP(NETIF_MSG_RX_STATUS,
988 "calling tpa_stop on queue %d\n",
991 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
994 if (fp->mode == TPA_MODE_GRO)
995 pages = (frag_size + tpa_info->full_page - 1) /
998 pages = SGE_PAGE_ALIGN(frag_size) >>
1001 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1002 &cqe->end_agg_cqe, comp_ring_cons);
1003 #ifdef BNX2X_STOP_ON_ERROR
1008 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1012 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1013 pad = cqe_fp->placement_offset;
1014 dma_sync_single_for_cpu(&bp->pdev->dev,
1015 dma_unmap_addr(rx_buf, mapping),
1016 pad + RX_COPY_THRESH,
1019 prefetch(data + pad); /* speedup eth_type_trans() */
1020 /* is this an error packet? */
1021 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1022 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1023 "ERROR flags %x rx packet %u\n",
1024 cqe_fp_flags, sw_comp_cons);
1025 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1029 /* Since we don't have a jumbo ring
1030 * copy small packets if mtu > 1500
1032 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1033 (len <= RX_COPY_THRESH)) {
1034 skb = napi_alloc_skb(&fp->napi, len);
1036 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1037 "ERROR packet dropped because of alloc failure\n");
1038 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1041 memcpy(skb->data, data + pad, len);
1042 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1044 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1045 GFP_ATOMIC) == 0)) {
1046 dma_unmap_single(&bp->pdev->dev,
1047 dma_unmap_addr(rx_buf, mapping),
1050 skb = build_skb(data, fp->rx_frag_size);
1051 if (unlikely(!skb)) {
1052 bnx2x_frag_free(fp, data);
1053 bnx2x_fp_qstats(bp, fp)->
1054 rx_skb_alloc_failed++;
1057 skb_reserve(skb, pad);
1059 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1060 "ERROR packet dropped because of alloc failure\n");
1061 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1063 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1069 skb->protocol = eth_type_trans(skb, bp->dev);
1071 /* Set Toeplitz hash for a none-LRO skb */
1072 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1073 skb_set_hash(skb, rxhash, rxhash_type);
1075 skb_checksum_none_assert(skb);
1077 if (bp->dev->features & NETIF_F_RXCSUM)
1078 bnx2x_csum_validate(skb, cqe, fp,
1079 bnx2x_fp_qstats(bp, fp));
1081 skb_record_rx_queue(skb, fp->rx_queue);
1083 /* Check if this packet was timestamped */
1084 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1085 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1086 bnx2x_set_rx_ts(bp, skb);
1088 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1090 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1091 le16_to_cpu(cqe_fp->vlan_tag));
1093 napi_gro_receive(&fp->napi, skb);
1095 rx_buf->data = NULL;
1097 bd_cons = NEXT_RX_IDX(bd_cons);
1098 bd_prod = NEXT_RX_IDX(bd_prod);
1099 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1102 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1103 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1105 /* mark CQE as free */
1106 BNX2X_SEED_CQE(cqe_fp);
1108 if (rx_pkt == budget)
1111 comp_ring_cons = RCQ_BD(sw_comp_cons);
1112 cqe = &fp->rx_comp_ring[comp_ring_cons];
1113 cqe_fp = &cqe->fast_path_cqe;
1116 fp->rx_bd_cons = bd_cons;
1117 fp->rx_bd_prod = bd_prod_fw;
1118 fp->rx_comp_cons = sw_comp_cons;
1119 fp->rx_comp_prod = sw_comp_prod;
1121 /* Update producers */
1122 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1128 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1130 struct bnx2x_fastpath *fp = fp_cookie;
1131 struct bnx2x *bp = fp->bp;
1135 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1136 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1138 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1140 #ifdef BNX2X_STOP_ON_ERROR
1141 if (unlikely(bp->panic))
1145 /* Handle Rx and Tx according to MSI-X vector */
1146 for_each_cos_in_tx_queue(fp, cos)
1147 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1149 prefetch(&fp->sb_running_index[SM_RX_ID]);
1150 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1155 /* HW Lock for shared dual port PHYs */
1156 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1158 mutex_lock(&bp->port.phy_mutex);
1160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1163 void bnx2x_release_phy_lock(struct bnx2x *bp)
1165 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1167 mutex_unlock(&bp->port.phy_mutex);
1170 /* calculates MF speed according to current linespeed and MF configuration */
1171 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1173 u16 line_speed = bp->link_vars.line_speed;
1175 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1176 bp->mf_config[BP_VN(bp)]);
1178 /* Calculate the current MAX line speed limit for the MF
1181 if (IS_MF_PERCENT_BW(bp))
1182 line_speed = (line_speed * maxCfg) / 100;
1183 else { /* SD mode */
1184 u16 vn_max_rate = maxCfg * 100;
1186 if (vn_max_rate < line_speed)
1187 line_speed = vn_max_rate;
1195 * bnx2x_fill_report_data - fill link report data to report
1197 * @bp: driver handle
1198 * @data: link state to update
1200 * It uses a none-atomic bit operations because is called under the mutex.
1202 static void bnx2x_fill_report_data(struct bnx2x *bp,
1203 struct bnx2x_link_report_data *data)
1205 memset(data, 0, sizeof(*data));
1208 /* Fill the report data: effective line speed */
1209 data->line_speed = bnx2x_get_mf_speed(bp);
1212 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1213 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1214 &data->link_report_flags);
1216 if (!BNX2X_NUM_ETH_QUEUES(bp))
1217 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1218 &data->link_report_flags);
1221 if (bp->link_vars.duplex == DUPLEX_FULL)
1222 __set_bit(BNX2X_LINK_REPORT_FD,
1223 &data->link_report_flags);
1225 /* Rx Flow Control is ON */
1226 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1227 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1228 &data->link_report_flags);
1230 /* Tx Flow Control is ON */
1231 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1232 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1233 &data->link_report_flags);
1235 *data = bp->vf_link_vars;
1240 * bnx2x_link_report - report link status to OS.
1242 * @bp: driver handle
1244 * Calls the __bnx2x_link_report() under the same locking scheme
1245 * as a link/PHY state managing code to ensure a consistent link
1249 void bnx2x_link_report(struct bnx2x *bp)
1251 bnx2x_acquire_phy_lock(bp);
1252 __bnx2x_link_report(bp);
1253 bnx2x_release_phy_lock(bp);
1257 * __bnx2x_link_report - report link status to OS.
1259 * @bp: driver handle
1261 * None atomic implementation.
1262 * Should be called under the phy_lock.
1264 void __bnx2x_link_report(struct bnx2x *bp)
1266 struct bnx2x_link_report_data cur_data;
1268 if (bp->force_link_down) {
1269 bp->link_vars.link_up = 0;
1274 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1275 bnx2x_read_mf_cfg(bp);
1277 /* Read the current link report info */
1278 bnx2x_fill_report_data(bp, &cur_data);
1280 /* Don't report link down or exactly the same link status twice */
1281 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1282 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1283 &bp->last_reported_link.link_report_flags) &&
1284 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1285 &cur_data.link_report_flags)))
1290 /* We are going to report a new link parameters now -
1291 * remember the current data for the next time.
1293 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1295 /* propagate status to VFs */
1297 bnx2x_iov_link_update(bp);
1299 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1300 &cur_data.link_report_flags)) {
1301 netif_carrier_off(bp->dev);
1302 netdev_err(bp->dev, "NIC Link is Down\n");
1308 netif_carrier_on(bp->dev);
1310 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1311 &cur_data.link_report_flags))
1316 /* Handle the FC at the end so that only these flags would be
1317 * possibly set. This way we may easily check if there is no FC
1320 if (cur_data.link_report_flags) {
1321 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1322 &cur_data.link_report_flags)) {
1323 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1324 &cur_data.link_report_flags))
1325 flow = "ON - receive & transmit";
1327 flow = "ON - receive";
1329 flow = "ON - transmit";
1334 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1335 cur_data.line_speed, duplex, flow);
1339 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1343 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1344 struct eth_rx_sge *sge;
1346 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1348 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1349 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1352 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1353 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1357 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1358 struct bnx2x_fastpath *fp, int last)
1362 for (i = 0; i < last; i++) {
1363 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1364 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1365 u8 *data = first_buf->data;
1368 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1371 if (tpa_info->tpa_state == BNX2X_TPA_START)
1372 dma_unmap_single(&bp->pdev->dev,
1373 dma_unmap_addr(first_buf, mapping),
1374 fp->rx_buf_size, DMA_FROM_DEVICE);
1375 bnx2x_frag_free(fp, data);
1376 first_buf->data = NULL;
1380 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1384 for_each_rx_queue_cnic(bp, j) {
1385 struct bnx2x_fastpath *fp = &bp->fp[j];
1389 /* Activate BD ring */
1391 * this will generate an interrupt (to the TSTORM)
1392 * must only be done after chip is initialized
1394 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1399 void bnx2x_init_rx_rings(struct bnx2x *bp)
1401 int func = BP_FUNC(bp);
1405 /* Allocate TPA resources */
1406 for_each_eth_queue(bp, j) {
1407 struct bnx2x_fastpath *fp = &bp->fp[j];
1410 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1412 if (fp->mode != TPA_MODE_DISABLED) {
1413 /* Fill the per-aggregation pool */
1414 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1415 struct bnx2x_agg_info *tpa_info =
1417 struct sw_rx_bd *first_buf =
1418 &tpa_info->first_buf;
1421 bnx2x_frag_alloc(fp, GFP_KERNEL);
1422 if (!first_buf->data) {
1423 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1425 bnx2x_free_tpa_pool(bp, fp, i);
1426 fp->mode = TPA_MODE_DISABLED;
1429 dma_unmap_addr_set(first_buf, mapping, 0);
1430 tpa_info->tpa_state = BNX2X_TPA_STOP;
1433 /* "next page" elements initialization */
1434 bnx2x_set_next_page_sgl(fp);
1436 /* set SGEs bit mask */
1437 bnx2x_init_sge_ring_bit_mask(fp);
1439 /* Allocate SGEs and initialize the ring elements */
1440 for (i = 0, ring_prod = 0;
1441 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1443 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1445 BNX2X_ERR("was only able to allocate %d rx sges\n",
1447 BNX2X_ERR("disabling TPA for queue[%d]\n",
1449 /* Cleanup already allocated elements */
1450 bnx2x_free_rx_sge_range(bp, fp,
1452 bnx2x_free_tpa_pool(bp, fp,
1454 fp->mode = TPA_MODE_DISABLED;
1458 ring_prod = NEXT_SGE_IDX(ring_prod);
1461 fp->rx_sge_prod = ring_prod;
1465 for_each_eth_queue(bp, j) {
1466 struct bnx2x_fastpath *fp = &bp->fp[j];
1470 /* Activate BD ring */
1472 * this will generate an interrupt (to the TSTORM)
1473 * must only be done after chip is initialized
1475 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1481 if (CHIP_IS_E1(bp)) {
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
1483 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1484 U64_LO(fp->rx_comp_mapping));
1485 REG_WR(bp, BAR_USTRORM_INTMEM +
1486 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1487 U64_HI(fp->rx_comp_mapping));
1492 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1495 struct bnx2x *bp = fp->bp;
1497 for_each_cos_in_tx_queue(fp, cos) {
1498 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1499 unsigned pkts_compl = 0, bytes_compl = 0;
1501 u16 sw_prod = txdata->tx_pkt_prod;
1502 u16 sw_cons = txdata->tx_pkt_cons;
1504 while (sw_cons != sw_prod) {
1505 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1506 &pkts_compl, &bytes_compl);
1510 netdev_tx_reset_queue(
1511 netdev_get_tx_queue(bp->dev,
1512 txdata->txq_index));
1516 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1520 for_each_tx_queue_cnic(bp, i) {
1521 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1525 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1529 for_each_eth_queue(bp, i) {
1530 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1534 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1536 struct bnx2x *bp = fp->bp;
1539 /* ring wasn't allocated */
1540 if (fp->rx_buf_ring == NULL)
1543 for (i = 0; i < NUM_RX_BD; i++) {
1544 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1545 u8 *data = rx_buf->data;
1549 dma_unmap_single(&bp->pdev->dev,
1550 dma_unmap_addr(rx_buf, mapping),
1551 fp->rx_buf_size, DMA_FROM_DEVICE);
1553 rx_buf->data = NULL;
1554 bnx2x_frag_free(fp, data);
1558 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1562 for_each_rx_queue_cnic(bp, j) {
1563 bnx2x_free_rx_bds(&bp->fp[j]);
1567 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1571 for_each_eth_queue(bp, j) {
1572 struct bnx2x_fastpath *fp = &bp->fp[j];
1574 bnx2x_free_rx_bds(fp);
1576 if (fp->mode != TPA_MODE_DISABLED)
1577 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1581 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1583 bnx2x_free_tx_skbs_cnic(bp);
1584 bnx2x_free_rx_skbs_cnic(bp);
1587 void bnx2x_free_skbs(struct bnx2x *bp)
1589 bnx2x_free_tx_skbs(bp);
1590 bnx2x_free_rx_skbs(bp);
1593 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1595 /* load old values */
1596 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1598 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1599 /* leave all but MAX value */
1600 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1602 /* set new MAX value */
1603 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1604 & FUNC_MF_CFG_MAX_BW_MASK;
1606 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1611 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1613 * @bp: driver handle
1614 * @nvecs: number of vectors to be released
1616 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1620 if (nvecs == offset)
1623 /* VFs don't have a default SB */
1625 free_irq(bp->msix_table[offset].vector, bp->dev);
1626 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1627 bp->msix_table[offset].vector);
1631 if (CNIC_SUPPORT(bp)) {
1632 if (nvecs == offset)
1637 for_each_eth_queue(bp, i) {
1638 if (nvecs == offset)
1640 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1641 i, bp->msix_table[offset].vector);
1643 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1647 void bnx2x_free_irq(struct bnx2x *bp)
1649 if (bp->flags & USING_MSIX_FLAG &&
1650 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1651 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1653 /* vfs don't have a default status block */
1657 bnx2x_free_msix_irqs(bp, nvecs);
1659 free_irq(bp->dev->irq, bp->dev);
1663 int bnx2x_enable_msix(struct bnx2x *bp)
1665 int msix_vec = 0, i, rc;
1667 /* VFs don't have a default status block */
1669 bp->msix_table[msix_vec].entry = msix_vec;
1670 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1671 bp->msix_table[0].entry);
1675 /* Cnic requires an msix vector for itself */
1676 if (CNIC_SUPPORT(bp)) {
1677 bp->msix_table[msix_vec].entry = msix_vec;
1678 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1679 msix_vec, bp->msix_table[msix_vec].entry);
1683 /* We need separate vectors for ETH queues only (not FCoE) */
1684 for_each_eth_queue(bp, i) {
1685 bp->msix_table[msix_vec].entry = msix_vec;
1686 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1687 msix_vec, msix_vec, i);
1691 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1694 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1695 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1697 * reconfigure number of tx/rx queues according to available
1700 if (rc == -ENOSPC) {
1701 /* Get by with single vector */
1702 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1704 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1709 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1710 bp->flags |= USING_SINGLE_MSIX_FLAG;
1712 BNX2X_DEV_INFO("set number of queues to 1\n");
1713 bp->num_ethernet_queues = 1;
1714 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1715 } else if (rc < 0) {
1716 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1718 } else if (rc < msix_vec) {
1719 /* how less vectors we will have? */
1720 int diff = msix_vec - rc;
1722 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1725 * decrease number of queues by number of unallocated entries
1727 bp->num_ethernet_queues -= diff;
1728 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1730 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1734 bp->flags |= USING_MSIX_FLAG;
1739 /* fall to INTx if not enough memory */
1741 bp->flags |= DISABLE_MSI_FLAG;
1746 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1748 int i, rc, offset = 0;
1750 /* no default status block for vf */
1752 rc = request_irq(bp->msix_table[offset++].vector,
1753 bnx2x_msix_sp_int, 0,
1754 bp->dev->name, bp->dev);
1756 BNX2X_ERR("request sp irq failed\n");
1761 if (CNIC_SUPPORT(bp))
1764 for_each_eth_queue(bp, i) {
1765 struct bnx2x_fastpath *fp = &bp->fp[i];
1766 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1769 rc = request_irq(bp->msix_table[offset].vector,
1770 bnx2x_msix_fp_int, 0, fp->name, fp);
1772 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1773 bp->msix_table[offset].vector, rc);
1774 bnx2x_free_msix_irqs(bp, offset);
1781 i = BNX2X_NUM_ETH_QUEUES(bp);
1783 offset = 1 + CNIC_SUPPORT(bp);
1784 netdev_info(bp->dev,
1785 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1786 bp->msix_table[0].vector,
1787 0, bp->msix_table[offset].vector,
1788 i - 1, bp->msix_table[offset + i - 1].vector);
1790 offset = CNIC_SUPPORT(bp);
1791 netdev_info(bp->dev,
1792 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1793 0, bp->msix_table[offset].vector,
1794 i - 1, bp->msix_table[offset + i - 1].vector);
1799 int bnx2x_enable_msi(struct bnx2x *bp)
1803 rc = pci_enable_msi(bp->pdev);
1805 BNX2X_DEV_INFO("MSI is not attainable\n");
1808 bp->flags |= USING_MSI_FLAG;
1813 static int bnx2x_req_irq(struct bnx2x *bp)
1815 unsigned long flags;
1818 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1821 flags = IRQF_SHARED;
1823 if (bp->flags & USING_MSIX_FLAG)
1824 irq = bp->msix_table[0].vector;
1826 irq = bp->pdev->irq;
1828 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1831 static int bnx2x_setup_irqs(struct bnx2x *bp)
1834 if (bp->flags & USING_MSIX_FLAG &&
1835 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1836 rc = bnx2x_req_msix_irqs(bp);
1840 rc = bnx2x_req_irq(bp);
1842 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1845 if (bp->flags & USING_MSI_FLAG) {
1846 bp->dev->irq = bp->pdev->irq;
1847 netdev_info(bp->dev, "using MSI IRQ %d\n",
1850 if (bp->flags & USING_MSIX_FLAG) {
1851 bp->dev->irq = bp->msix_table[0].vector;
1852 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1860 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1864 for_each_rx_queue_cnic(bp, i) {
1865 napi_enable(&bnx2x_fp(bp, i, napi));
1869 static void bnx2x_napi_enable(struct bnx2x *bp)
1873 for_each_eth_queue(bp, i) {
1874 napi_enable(&bnx2x_fp(bp, i, napi));
1878 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1882 for_each_rx_queue_cnic(bp, i) {
1883 napi_disable(&bnx2x_fp(bp, i, napi));
1887 static void bnx2x_napi_disable(struct bnx2x *bp)
1891 for_each_eth_queue(bp, i) {
1892 napi_disable(&bnx2x_fp(bp, i, napi));
1896 void bnx2x_netif_start(struct bnx2x *bp)
1898 if (netif_running(bp->dev)) {
1899 bnx2x_napi_enable(bp);
1900 if (CNIC_LOADED(bp))
1901 bnx2x_napi_enable_cnic(bp);
1902 bnx2x_int_enable(bp);
1903 if (bp->state == BNX2X_STATE_OPEN)
1904 netif_tx_wake_all_queues(bp->dev);
1908 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1910 bnx2x_int_disable_sync(bp, disable_hw);
1911 bnx2x_napi_disable(bp);
1912 if (CNIC_LOADED(bp))
1913 bnx2x_napi_disable_cnic(bp);
1916 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1917 struct net_device *sb_dev)
1919 struct bnx2x *bp = netdev_priv(dev);
1921 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1922 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1923 u16 ether_type = ntohs(hdr->h_proto);
1925 /* Skip VLAN tag if present */
1926 if (ether_type == ETH_P_8021Q) {
1927 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
1929 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1932 /* If ethertype is FCoE or FIP - use FCoE ring */
1933 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1934 return bnx2x_fcoe_tx(bp, txq_index);
1937 /* select a non-FCoE queue */
1938 return netdev_pick_tx(dev, skb, NULL) %
1939 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1942 void bnx2x_set_num_queues(struct bnx2x *bp)
1945 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1947 /* override in STORAGE SD modes */
1948 if (IS_MF_STORAGE_ONLY(bp))
1949 bp->num_ethernet_queues = 1;
1951 /* Add special queues */
1952 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1953 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1955 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1959 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1961 * @bp: Driver handle
1962 * @include_cnic: handle cnic case
1964 * We currently support for at most 16 Tx queues for each CoS thus we will
1965 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1968 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1969 * index after all ETH L2 indices.
1971 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1972 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1973 * 16..31,...) with indices that are not coupled with any real Tx queue.
1975 * The proper configuration of skb->queue_mapping is handled by
1976 * bnx2x_select_queue() and __skb_tx_hash().
1978 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1979 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1981 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1985 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1986 rx = BNX2X_NUM_ETH_QUEUES(bp);
1988 /* account for fcoe queue */
1989 if (include_cnic && !NO_FCOE(bp)) {
1994 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1996 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1999 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2001 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2005 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2011 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2015 for_each_queue(bp, i) {
2016 struct bnx2x_fastpath *fp = &bp->fp[i];
2019 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2022 * Although there are no IP frames expected to arrive to
2023 * this ring we still want to add an
2024 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2027 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2030 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2031 IP_HEADER_ALIGNMENT_PADDING +
2034 BNX2X_FW_RX_ALIGN_END;
2035 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2036 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2037 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2038 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2040 fp->rx_frag_size = 0;
2044 static int bnx2x_init_rss(struct bnx2x *bp)
2047 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2049 /* Prepare the initial contents for the indirection table if RSS is
2052 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2053 bp->rss_conf_obj.ind_table[i] =
2055 ethtool_rxfh_indir_default(i, num_eth_queues);
2058 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2059 * per-port, so if explicit configuration is needed , do it only
2062 * For 57712 and newer on the other hand it's a per-function
2065 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2068 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2069 bool config_hash, bool enable)
2071 struct bnx2x_config_rss_params params = {NULL};
2073 /* Although RSS is meaningless when there is a single HW queue we
2074 * still need it enabled in order to have HW Rx hash generated.
2076 * if (!is_eth_multi(bp))
2077 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2080 params.rss_obj = rss_obj;
2082 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2085 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2087 /* RSS configuration */
2088 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2089 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2090 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2091 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2092 if (rss_obj->udp_rss_v4)
2093 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2094 if (rss_obj->udp_rss_v6)
2095 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2097 if (!CHIP_IS_E1x(bp)) {
2098 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2099 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2100 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2102 /* valid only for TUNN_MODE_GRE tunnel mode */
2103 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2106 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2110 params.rss_result_mask = MULTI_MASK;
2112 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2116 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2117 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2121 return bnx2x_config_rss(bp, ¶ms);
2123 return bnx2x_vfpf_config_rss(bp, ¶ms);
2126 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2128 struct bnx2x_func_state_params func_params = {NULL};
2130 /* Prepare parameters for function state transitions */
2131 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2133 func_params.f_obj = &bp->func_obj;
2134 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2136 func_params.params.hw_init.load_phase = load_code;
2138 return bnx2x_func_state_change(bp, &func_params);
2142 * Cleans the object that have internal lists without sending
2143 * ramrods. Should be run when interrupts are disabled.
2145 void bnx2x_squeeze_objects(struct bnx2x *bp)
2148 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2149 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2150 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2152 /***************** Cleanup MACs' object first *************************/
2154 /* Wait for completion of requested */
2155 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2156 /* Perform a dry cleanup */
2157 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2159 /* Clean ETH primary MAC */
2160 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2161 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2164 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2166 /* Cleanup UC list */
2168 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2169 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2172 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2174 /***************** Now clean mcast object *****************************/
2175 rparam.mcast_obj = &bp->mcast_obj;
2176 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2178 /* Add a DEL command... - Since we're doing a driver cleanup only,
2179 * we take a lock surrounding both the initial send and the CONTs,
2180 * as we don't want a true completion to disrupt us in the middle.
2182 netif_addr_lock_bh(bp->dev);
2183 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2185 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2188 /* ...and wait until all pending commands are cleared */
2189 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2192 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2194 netif_addr_unlock_bh(bp->dev);
2198 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2200 netif_addr_unlock_bh(bp->dev);
2203 #ifndef BNX2X_STOP_ON_ERROR
2204 #define LOAD_ERROR_EXIT(bp, label) \
2206 (bp)->state = BNX2X_STATE_ERROR; \
2210 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2212 bp->cnic_loaded = false; \
2215 #else /*BNX2X_STOP_ON_ERROR*/
2216 #define LOAD_ERROR_EXIT(bp, label) \
2218 (bp)->state = BNX2X_STATE_ERROR; \
2222 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2224 bp->cnic_loaded = false; \
2228 #endif /*BNX2X_STOP_ON_ERROR*/
2230 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2232 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2233 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2237 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2239 int num_groups, vf_headroom = 0;
2240 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2242 /* number of queues for statistics is number of eth queues + FCoE */
2243 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2245 /* Total number of FW statistics requests =
2246 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2247 * and fcoe l2 queue) stats + num of queues (which includes another 1
2248 * for fcoe l2 queue if applicable)
2250 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2252 /* vf stats appear in the request list, but their data is allocated by
2253 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2254 * it is used to determine where to place the vf stats queries in the
2258 vf_headroom = bnx2x_vf_headroom(bp);
2260 /* Request is built from stats_query_header and an array of
2261 * stats_query_cmd_group each of which contains
2262 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2263 * configured in the stats_query_header.
2266 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2267 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2270 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2271 bp->fw_stats_num, vf_headroom, num_groups);
2272 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2273 num_groups * sizeof(struct stats_query_cmd_group);
2275 /* Data for statistics requests + stats_counter
2276 * stats_counter holds per-STORM counters that are incremented
2277 * when STORM has finished with the current request.
2278 * memory for FCoE offloaded statistics are counted anyway,
2279 * even if they will not be sent.
2280 * VF stats are not accounted for here as the data of VF stats is stored
2281 * in memory allocated by the VF, not here.
2283 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2284 sizeof(struct per_pf_stats) +
2285 sizeof(struct fcoe_statistics_params) +
2286 sizeof(struct per_queue_stats) * num_queue_stats +
2287 sizeof(struct stats_counter);
2289 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2290 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2295 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2296 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2297 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2298 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2299 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2300 bp->fw_stats_req_sz;
2302 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2303 U64_HI(bp->fw_stats_req_mapping),
2304 U64_LO(bp->fw_stats_req_mapping));
2305 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2306 U64_HI(bp->fw_stats_data_mapping),
2307 U64_LO(bp->fw_stats_data_mapping));
2311 bnx2x_free_fw_stats_mem(bp);
2312 BNX2X_ERR("Can't allocate FW stats memory\n");
2316 /* send load request to mcp and analyze response */
2317 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2323 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2324 DRV_MSG_SEQ_NUMBER_MASK);
2325 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2327 /* Get current FW pulse sequence */
2328 bp->fw_drv_pulse_wr_seq =
2329 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2330 DRV_PULSE_SEQ_MASK);
2331 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2333 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2335 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2336 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2339 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2341 /* if mcp fails to respond we must abort */
2342 if (!(*load_code)) {
2343 BNX2X_ERR("MCP response failure, aborting\n");
2347 /* If mcp refused (e.g. other port is in diagnostic mode) we
2350 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2351 BNX2X_ERR("MCP refused load request, aborting\n");
2357 /* check whether another PF has already loaded FW to chip. In
2358 * virtualized environments a pf from another VM may have already
2359 * initialized the device including loading FW
2361 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2363 /* is another pf loaded on this engine? */
2364 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2365 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2366 u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
2369 /* read loaded FW from chip */
2370 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2372 loaded_fw_major = loaded_fw & 0xff;
2373 loaded_fw_minor = (loaded_fw >> 8) & 0xff;
2374 loaded_fw_rev = (loaded_fw >> 16) & 0xff;
2375 loaded_fw_eng = (loaded_fw >> 24) & 0xff;
2377 DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
2378 loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
2380 /* abort nic load if version mismatch */
2383 BNX2X_ERR("loaded FW incompatible. Aborting\n");
2385 BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
2393 /* returns the "mcp load_code" according to global load_count array */
2394 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2396 int path = BP_PATH(bp);
2398 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2399 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2400 bnx2x_load_count[path][2]);
2401 bnx2x_load_count[path][0]++;
2402 bnx2x_load_count[path][1 + port]++;
2403 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2404 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2405 bnx2x_load_count[path][2]);
2406 if (bnx2x_load_count[path][0] == 1)
2407 return FW_MSG_CODE_DRV_LOAD_COMMON;
2408 else if (bnx2x_load_count[path][1 + port] == 1)
2409 return FW_MSG_CODE_DRV_LOAD_PORT;
2411 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2414 /* mark PMF if applicable */
2415 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2417 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2418 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2419 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2421 /* We need the barrier to ensure the ordering between the
2422 * writing to bp->port.pmf here and reading it from the
2423 * bnx2x_periodic_task().
2430 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2433 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2435 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2436 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2437 (bp->common.shmem2_base)) {
2438 if (SHMEM2_HAS(bp, dcc_support))
2439 SHMEM2_WR(bp, dcc_support,
2440 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2441 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2442 if (SHMEM2_HAS(bp, afex_driver_support))
2443 SHMEM2_WR(bp, afex_driver_support,
2444 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2447 /* Set AFEX default VLAN tag to an invalid value */
2448 bp->afex_def_vlan_tag = -1;
2452 * bnx2x_bz_fp - zero content of the fastpath structure.
2454 * @bp: driver handle
2455 * @index: fastpath index to be zeroed
2457 * Makes sure the contents of the bp->fp[index].napi is kept
2460 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2462 struct bnx2x_fastpath *fp = &bp->fp[index];
2464 struct napi_struct orig_napi = fp->napi;
2465 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2467 /* bzero bnx2x_fastpath contents */
2469 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2470 sizeof(struct bnx2x_agg_info));
2471 memset(fp, 0, sizeof(*fp));
2473 /* Restore the NAPI object as it has been already initialized */
2474 fp->napi = orig_napi;
2475 fp->tpa_info = orig_tpa_info;
2479 fp->max_cos = bp->max_cos;
2481 /* Special queues support only one CoS */
2484 /* Init txdata pointers */
2486 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2488 for_each_cos_in_tx_queue(fp, cos)
2489 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2490 BNX2X_NUM_ETH_QUEUES(bp) + index];
2492 /* set the tpa flag for each queue. The tpa flag determines the queue
2493 * minimal size so it must be set prior to queue memory allocation
2495 if (bp->dev->features & NETIF_F_LRO)
2496 fp->mode = TPA_MODE_LRO;
2497 else if (bp->dev->features & NETIF_F_GRO_HW)
2498 fp->mode = TPA_MODE_GRO;
2500 fp->mode = TPA_MODE_DISABLED;
2502 /* We don't want TPA if it's disabled in bp
2503 * or if this is an FCoE L2 ring.
2505 if (bp->disable_tpa || IS_FCOE_FP(fp))
2506 fp->mode = TPA_MODE_DISABLED;
2509 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2513 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2516 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2517 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2520 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2523 int bnx2x_load_cnic(struct bnx2x *bp)
2525 int i, rc, port = BP_PORT(bp);
2527 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2529 mutex_init(&bp->cnic_mutex);
2532 rc = bnx2x_alloc_mem_cnic(bp);
2534 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2535 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539 rc = bnx2x_alloc_fp_mem_cnic(bp);
2541 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2542 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2545 /* Update the number of queues with the cnic queues */
2546 rc = bnx2x_set_real_num_queues(bp, 1);
2548 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2549 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2552 /* Add all CNIC NAPI objects */
2553 bnx2x_add_all_napi_cnic(bp);
2554 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2555 bnx2x_napi_enable_cnic(bp);
2557 rc = bnx2x_init_hw_func_cnic(bp);
2559 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2561 bnx2x_nic_init_cnic(bp);
2564 /* Enable Timer scan */
2565 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2567 /* setup cnic queues */
2568 for_each_cnic_queue(bp, i) {
2569 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2571 BNX2X_ERR("Queue setup failed\n");
2572 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2577 /* Initialize Rx filter. */
2578 bnx2x_set_rx_mode_inner(bp);
2580 /* re-read iscsi info */
2581 bnx2x_get_iscsi_info(bp);
2582 bnx2x_setup_cnic_irq_info(bp);
2583 bnx2x_setup_cnic_info(bp);
2584 bp->cnic_loaded = true;
2585 if (bp->state == BNX2X_STATE_OPEN)
2586 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2588 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2592 #ifndef BNX2X_STOP_ON_ERROR
2594 /* Disable Timer scan */
2595 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2598 bnx2x_napi_disable_cnic(bp);
2599 /* Update the number of queues without the cnic queues */
2600 if (bnx2x_set_real_num_queues(bp, 0))
2601 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2603 BNX2X_ERR("CNIC-related load failed\n");
2604 bnx2x_free_fp_mem_cnic(bp);
2605 bnx2x_free_mem_cnic(bp);
2607 #endif /* ! BNX2X_STOP_ON_ERROR */
2610 /* must be called with rtnl_lock */
2611 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2613 int port = BP_PORT(bp);
2614 int i, rc = 0, load_code = 0;
2616 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2618 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2620 #ifdef BNX2X_STOP_ON_ERROR
2621 if (unlikely(bp->panic)) {
2622 BNX2X_ERR("Can't load NIC when there is panic\n");
2627 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2629 /* zero the structure w/o any lock, before SP handler is initialized */
2630 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2631 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2632 &bp->last_reported_link.link_report_flags);
2635 /* must be called before memory allocation and HW init */
2636 bnx2x_ilt_set_info(bp);
2639 * Zero fastpath structures preserving invariants like napi, which are
2640 * allocated only once, fp index, max_cos, bp pointer.
2641 * Also set fp->mode and txdata_ptr.
2643 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2644 for_each_queue(bp, i)
2646 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2647 bp->num_cnic_queues) *
2648 sizeof(struct bnx2x_fp_txdata));
2650 bp->fcoe_init = false;
2652 /* Set the receive queues buffer size */
2653 bnx2x_set_rx_buf_size(bp);
2656 rc = bnx2x_alloc_mem(bp);
2658 BNX2X_ERR("Unable to allocate bp memory\n");
2663 /* need to be done after alloc mem, since it's self adjusting to amount
2664 * of memory available for RSS queues
2666 rc = bnx2x_alloc_fp_mem(bp);
2668 BNX2X_ERR("Unable to allocate memory for fps\n");
2669 LOAD_ERROR_EXIT(bp, load_error0);
2672 /* Allocated memory for FW statistics */
2673 rc = bnx2x_alloc_fw_stats_mem(bp);
2675 LOAD_ERROR_EXIT(bp, load_error0);
2677 /* request pf to initialize status blocks */
2679 rc = bnx2x_vfpf_init(bp);
2681 LOAD_ERROR_EXIT(bp, load_error0);
2684 /* As long as bnx2x_alloc_mem() may possibly update
2685 * bp->num_queues, bnx2x_set_real_num_queues() should always
2686 * come after it. At this stage cnic queues are not counted.
2688 rc = bnx2x_set_real_num_queues(bp, 0);
2690 BNX2X_ERR("Unable to set real_num_queues\n");
2691 LOAD_ERROR_EXIT(bp, load_error0);
2694 /* configure multi cos mappings in kernel.
2695 * this configuration may be overridden by a multi class queue
2696 * discipline or by a dcbx negotiation result.
2698 bnx2x_setup_tc(bp->dev, bp->max_cos);
2700 /* Add all NAPI objects */
2701 bnx2x_add_all_napi(bp);
2702 DP(NETIF_MSG_IFUP, "napi added\n");
2703 bnx2x_napi_enable(bp);
2706 /* set pf load just before approaching the MCP */
2707 bnx2x_set_pf_load(bp);
2709 /* if mcp exists send load request and analyze response */
2710 if (!BP_NOMCP(bp)) {
2711 /* attempt to load pf */
2712 rc = bnx2x_nic_load_request(bp, &load_code);
2714 LOAD_ERROR_EXIT(bp, load_error1);
2716 /* what did mcp say? */
2717 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2719 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2720 LOAD_ERROR_EXIT(bp, load_error2);
2723 load_code = bnx2x_nic_load_no_mcp(bp, port);
2726 /* mark pmf if applicable */
2727 bnx2x_nic_load_pmf(bp, load_code);
2729 /* Init Function state controlling object */
2730 bnx2x__init_func_obj(bp);
2733 rc = bnx2x_init_hw(bp, load_code);
2735 BNX2X_ERR("HW init failed, aborting\n");
2736 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2737 LOAD_ERROR_EXIT(bp, load_error2);
2741 bnx2x_pre_irq_nic_init(bp);
2743 /* Connect to IRQs */
2744 rc = bnx2x_setup_irqs(bp);
2746 BNX2X_ERR("setup irqs failed\n");
2748 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2749 LOAD_ERROR_EXIT(bp, load_error2);
2752 /* Init per-function objects */
2754 /* Setup NIC internals and enable interrupts */
2755 bnx2x_post_irq_nic_init(bp, load_code);
2757 bnx2x_init_bp_objs(bp);
2758 bnx2x_iov_nic_init(bp);
2760 /* Set AFEX default VLAN tag to an invalid value */
2761 bp->afex_def_vlan_tag = -1;
2762 bnx2x_nic_load_afex_dcc(bp, load_code);
2763 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2764 rc = bnx2x_func_start(bp);
2766 BNX2X_ERR("Function start failed!\n");
2767 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2769 LOAD_ERROR_EXIT(bp, load_error3);
2772 /* Send LOAD_DONE command to MCP */
2773 if (!BP_NOMCP(bp)) {
2774 load_code = bnx2x_fw_command(bp,
2775 DRV_MSG_CODE_LOAD_DONE, 0);
2777 BNX2X_ERR("MCP response failure, aborting\n");
2779 LOAD_ERROR_EXIT(bp, load_error3);
2783 /* initialize FW coalescing state machines in RAM */
2784 bnx2x_update_coalesce(bp);
2787 /* setup the leading queue */
2788 rc = bnx2x_setup_leading(bp);
2790 BNX2X_ERR("Setup leading failed!\n");
2791 LOAD_ERROR_EXIT(bp, load_error3);
2794 /* set up the rest of the queues */
2795 for_each_nondefault_eth_queue(bp, i) {
2797 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2799 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2801 BNX2X_ERR("Queue %d setup failed\n", i);
2802 LOAD_ERROR_EXIT(bp, load_error3);
2807 rc = bnx2x_init_rss(bp);
2809 BNX2X_ERR("PF RSS init failed\n");
2810 LOAD_ERROR_EXIT(bp, load_error3);
2813 /* Now when Clients are configured we are ready to work */
2814 bp->state = BNX2X_STATE_OPEN;
2816 /* Configure a ucast MAC */
2818 rc = bnx2x_set_eth_mac(bp, true);
2820 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2823 BNX2X_ERR("Setting Ethernet MAC failed\n");
2824 LOAD_ERROR_EXIT(bp, load_error3);
2827 if (IS_PF(bp) && bp->pending_max) {
2828 bnx2x_update_max_mf_config(bp, bp->pending_max);
2829 bp->pending_max = 0;
2832 bp->force_link_down = false;
2834 rc = bnx2x_initial_phy_init(bp, load_mode);
2836 LOAD_ERROR_EXIT(bp, load_error3);
2838 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2840 /* Start fast path */
2842 /* Re-configure vlan filters */
2843 rc = bnx2x_vlan_reconfigure_vid(bp);
2845 LOAD_ERROR_EXIT(bp, load_error3);
2847 /* Initialize Rx filter. */
2848 bnx2x_set_rx_mode_inner(bp);
2850 if (bp->flags & PTP_SUPPORTED) {
2851 bnx2x_register_phc(bp);
2853 bnx2x_configure_ptp_filters(bp);
2856 switch (load_mode) {
2858 /* Tx queue should be only re-enabled */
2859 netif_tx_wake_all_queues(bp->dev);
2863 netif_tx_start_all_queues(bp->dev);
2864 smp_mb__after_atomic();
2868 case LOAD_LOOPBACK_EXT:
2869 bp->state = BNX2X_STATE_DIAG;
2877 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2879 bnx2x__link_status_update(bp);
2881 /* start the timer */
2882 mod_timer(&bp->timer, jiffies + bp->current_interval);
2884 if (CNIC_ENABLED(bp))
2885 bnx2x_load_cnic(bp);
2888 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2890 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2891 /* mark driver is loaded in shmem2 */
2893 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2894 val &= ~DRV_FLAGS_MTU_MASK;
2895 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2896 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2897 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2898 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2901 /* Wait for all pending SP commands to complete */
2902 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2903 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2904 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2908 /* Update driver data for On-Chip MFW dump. */
2910 bnx2x_update_mfw_dump(bp);
2912 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2913 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2914 bnx2x_dcbx_init(bp, false);
2916 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2917 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2919 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2923 #ifndef BNX2X_STOP_ON_ERROR
2926 bnx2x_int_disable_sync(bp, 1);
2928 /* Clean queueable objects */
2929 bnx2x_squeeze_objects(bp);
2932 /* Free SKBs, SGEs, TPA pool and driver internals */
2933 bnx2x_free_skbs(bp);
2934 for_each_rx_queue(bp, i)
2935 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2940 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2941 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2942 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2947 bnx2x_napi_disable(bp);
2948 bnx2x_del_all_napi(bp);
2950 /* clear pf_load status, as it was already set */
2952 bnx2x_clear_pf_load(bp);
2954 bnx2x_free_fw_stats_mem(bp);
2955 bnx2x_free_fp_mem(bp);
2959 #endif /* ! BNX2X_STOP_ON_ERROR */
2962 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2966 /* Wait until tx fastpath tasks complete */
2967 for_each_tx_queue(bp, i) {
2968 struct bnx2x_fastpath *fp = &bp->fp[i];
2970 for_each_cos_in_tx_queue(fp, cos)
2971 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2978 /* must be called with rtnl_lock */
2979 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2982 bool global = false;
2984 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2986 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2987 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2989 /* mark driver is unloaded in shmem2 */
2990 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2992 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2993 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2994 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2997 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2998 (bp->state == BNX2X_STATE_CLOSED ||
2999 bp->state == BNX2X_STATE_ERROR)) {
3000 /* We can get here if the driver has been unloaded
3001 * during parity error recovery and is either waiting for a
3002 * leader to complete or for other functions to unload and
3003 * then ifdown has been issued. In this case we want to
3004 * unload and let other functions to complete a recovery
3007 bp->recovery_state = BNX2X_RECOVERY_DONE;
3009 bnx2x_release_leader_lock(bp);
3012 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3013 BNX2X_ERR("Can't unload in closed or error state\n");
3017 /* Nothing to do during unload if previous bnx2x_nic_load()
3018 * have not completed successfully - all resources are released.
3020 * we can get here only after unsuccessful ndo_* callback, during which
3021 * dev->IFF_UP flag is still on.
3023 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3026 /* It's important to set the bp->state to the value different from
3027 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3028 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3030 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3033 /* indicate to VFs that the PF is going down */
3034 bnx2x_iov_channel_down(bp);
3036 if (CNIC_LOADED(bp))
3037 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3040 bnx2x_tx_disable(bp);
3041 netdev_reset_tc(bp->dev);
3043 bp->rx_mode = BNX2X_RX_MODE_NONE;
3045 del_timer_sync(&bp->timer);
3047 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3048 /* Set ALWAYS_ALIVE bit in shmem */
3049 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3050 bnx2x_drv_pulse(bp);
3051 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3052 bnx2x_save_statistics(bp);
3055 /* wait till consumers catch up with producers in all queues.
3056 * If we're recovering, FW can't write to host so no reason
3057 * to wait for the queues to complete all Tx.
3059 if (unload_mode != UNLOAD_RECOVERY)
3060 bnx2x_drain_tx_queues(bp);
3062 /* if VF indicate to PF this function is going down (PF will delete sp
3063 * elements and clear initializations
3066 bnx2x_clear_vlan_info(bp);
3067 bnx2x_vfpf_close_vf(bp);
3068 } else if (unload_mode != UNLOAD_RECOVERY) {
3069 /* if this is a normal/close unload need to clean up chip*/
3070 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3072 /* Send the UNLOAD_REQUEST to the MCP */
3073 bnx2x_send_unload_req(bp, unload_mode);
3075 /* Prevent transactions to host from the functions on the
3076 * engine that doesn't reset global blocks in case of global
3077 * attention once global blocks are reset and gates are opened
3078 * (the engine which leader will perform the recovery
3081 if (!CHIP_IS_E1x(bp))
3082 bnx2x_pf_disable(bp);
3084 /* Disable HW interrupts, NAPI */
3085 bnx2x_netif_stop(bp, 1);
3086 /* Delete all NAPI objects */
3087 bnx2x_del_all_napi(bp);
3088 if (CNIC_LOADED(bp))
3089 bnx2x_del_all_napi_cnic(bp);
3093 /* Report UNLOAD_DONE to MCP */
3094 bnx2x_send_unload_done(bp, false);
3098 * At this stage no more interrupts will arrive so we may safely clean
3099 * the queueable objects here in case they failed to get cleaned so far.
3102 bnx2x_squeeze_objects(bp);
3104 /* There should be no more pending SP commands at this stage */
3109 /* clear pending work in rtnl task */
3110 bp->sp_rtnl_state = 0;
3113 /* Free SKBs, SGEs, TPA pool and driver internals */
3114 bnx2x_free_skbs(bp);
3115 if (CNIC_LOADED(bp))
3116 bnx2x_free_skbs_cnic(bp);
3117 for_each_rx_queue(bp, i)
3118 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3120 bnx2x_free_fp_mem(bp);
3121 if (CNIC_LOADED(bp))
3122 bnx2x_free_fp_mem_cnic(bp);
3125 if (CNIC_LOADED(bp))
3126 bnx2x_free_mem_cnic(bp);
3130 bp->state = BNX2X_STATE_CLOSED;
3131 bp->cnic_loaded = false;
3133 /* Clear driver version indication in shmem */
3134 if (IS_PF(bp) && !BP_NOMCP(bp))
3135 bnx2x_update_mng_version(bp);
3137 /* Check if there are pending parity attentions. If there are - set
3138 * RECOVERY_IN_PROGRESS.
3140 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3141 bnx2x_set_reset_in_progress(bp);
3143 /* Set RESET_IS_GLOBAL if needed */
3145 bnx2x_set_reset_global(bp);
3148 /* The last driver must disable a "close the gate" if there is no
3149 * parity attention or "process kill" pending.
3152 !bnx2x_clear_pf_load(bp) &&
3153 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3154 bnx2x_disable_close_the_gate(bp);
3156 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3161 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3165 /* If there is no power capability, silently succeed */
3166 if (!bp->pdev->pm_cap) {
3167 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3171 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3175 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3176 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3177 PCI_PM_CTRL_PME_STATUS));
3179 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3180 /* delay required during transition out of D3hot */
3185 /* If there are other clients above don't
3186 shut down the power */
3187 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3189 /* Don't shut down the power for emulation and FPGA */
3190 if (CHIP_REV_IS_SLOW(bp))
3193 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3197 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3199 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3202 /* No more memory access after this point until
3203 * device is brought back to D0.
3208 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3215 * net_device service functions
3217 static int bnx2x_poll(struct napi_struct *napi, int budget)
3219 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3221 struct bnx2x *bp = fp->bp;
3225 #ifdef BNX2X_STOP_ON_ERROR
3226 if (unlikely(bp->panic)) {
3227 napi_complete(napi);
3231 for_each_cos_in_tx_queue(fp, cos)
3232 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3233 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3235 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3237 if (rx_work_done < budget) {
3238 /* No need to update SB for FCoE L2 ring as long as
3239 * it's connected to the default SB and the SB
3240 * has been updated when NAPI was scheduled.
3242 if (IS_FCOE_FP(fp)) {
3243 napi_complete_done(napi, rx_work_done);
3245 bnx2x_update_fpsb_idx(fp);
3246 /* bnx2x_has_rx_work() reads the status block,
3247 * thus we need to ensure that status block indices
3248 * have been actually read (bnx2x_update_fpsb_idx)
3249 * prior to this check (bnx2x_has_rx_work) so that
3250 * we won't write the "newer" value of the status block
3251 * to IGU (if there was a DMA right after
3252 * bnx2x_has_rx_work and if there is no rmb, the memory
3253 * reading (bnx2x_update_fpsb_idx) may be postponed
3254 * to right before bnx2x_ack_sb). In this case there
3255 * will never be another interrupt until there is
3256 * another update of the status block, while there
3257 * is still unhandled work.
3261 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3262 if (napi_complete_done(napi, rx_work_done)) {
3263 /* Re-enable interrupts */
3264 DP(NETIF_MSG_RX_STATUS,
3265 "Update index to %d\n", fp->fp_hc_idx);
3266 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3267 le16_to_cpu(fp->fp_hc_idx),
3271 rx_work_done = budget;
3276 return rx_work_done;
3279 /* we split the first BD into headers and data BDs
3280 * to ease the pain of our fellow microcode engineers
3281 * we use one mapping for both BDs
3283 static u16 bnx2x_tx_split(struct bnx2x *bp,
3284 struct bnx2x_fp_txdata *txdata,
3285 struct sw_tx_bd *tx_buf,
3286 struct eth_tx_start_bd **tx_bd, u16 hlen,
3289 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3290 struct eth_tx_bd *d_tx_bd;
3292 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3294 /* first fix first BD */
3295 h_tx_bd->nbytes = cpu_to_le16(hlen);
3297 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3298 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3300 /* now get a new data BD
3301 * (after the pbd) and fill it */
3302 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3303 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3305 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3306 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3308 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3309 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3310 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3312 /* this marks the BD as one that has no individual mapping */
3313 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3315 DP(NETIF_MSG_TX_QUEUED,
3316 "TSO split data size is %d (%x:%x)\n",
3317 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3320 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3325 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3326 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3327 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3329 __sum16 tsum = (__force __sum16) csum;
3332 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3333 csum_partial(t_header - fix, fix, 0)));
3336 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3337 csum_partial(t_header, -fix, 0)));
3339 return bswab16(tsum);
3342 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3348 if (skb->ip_summed != CHECKSUM_PARTIAL)
3351 protocol = vlan_get_protocol(skb);
3352 if (protocol == htons(ETH_P_IPV6)) {
3354 prot = ipv6_hdr(skb)->nexthdr;
3357 prot = ip_hdr(skb)->protocol;
3360 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3361 if (inner_ip_hdr(skb)->version == 6) {
3362 rc |= XMIT_CSUM_ENC_V6;
3363 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3364 rc |= XMIT_CSUM_TCP;
3366 rc |= XMIT_CSUM_ENC_V4;
3367 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3368 rc |= XMIT_CSUM_TCP;
3371 if (prot == IPPROTO_TCP)
3372 rc |= XMIT_CSUM_TCP;
3374 if (skb_is_gso(skb)) {
3375 if (skb_is_gso_v6(skb)) {
3376 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3377 if (rc & XMIT_CSUM_ENC)
3378 rc |= XMIT_GSO_ENC_V6;
3380 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3381 if (rc & XMIT_CSUM_ENC)
3382 rc |= XMIT_GSO_ENC_V4;
3389 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3390 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3392 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3393 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3395 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3396 /* check if packet requires linearization (packet is too fragmented)
3397 no need to check fragmentation if page size > 8K (there will be no
3398 violation to FW restrictions) */
3399 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3402 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3403 int to_copy = 0, hlen = 0;
3405 if (xmit_type & XMIT_GSO_ENC)
3406 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3408 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3409 if (xmit_type & XMIT_GSO) {
3410 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3411 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3412 /* Number of windows to check */
3413 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3418 /* Headers length */
3419 if (xmit_type & XMIT_GSO_ENC)
3420 hlen = (int)(skb_inner_transport_header(skb) -
3422 inner_tcp_hdrlen(skb);
3424 hlen = (int)(skb_transport_header(skb) -
3425 skb->data) + tcp_hdrlen(skb);
3427 /* Amount of data (w/o headers) on linear part of SKB*/
3428 first_bd_sz = skb_headlen(skb) - hlen;
3430 wnd_sum = first_bd_sz;
3432 /* Calculate the first sum - it's special */
3433 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3435 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3437 /* If there was data on linear skb data - check it */
3438 if (first_bd_sz > 0) {
3439 if (unlikely(wnd_sum < lso_mss)) {
3444 wnd_sum -= first_bd_sz;
3447 /* Others are easier: run through the frag list and
3448 check all windows */
3449 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3451 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3453 if (unlikely(wnd_sum < lso_mss)) {
3458 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3461 /* in non-LSO too fragmented packet should always
3468 if (unlikely(to_copy))
3469 DP(NETIF_MSG_TX_QUEUED,
3470 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3471 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3472 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3479 * bnx2x_set_pbd_gso - update PBD in GSO case.
3483 * @xmit_type: xmit flags
3485 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3486 struct eth_tx_parse_bd_e1x *pbd,
3489 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3490 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3491 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3493 if (xmit_type & XMIT_GSO_V4) {
3494 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3495 pbd->tcp_pseudo_csum =
3496 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3498 0, IPPROTO_TCP, 0));
3500 pbd->tcp_pseudo_csum =
3501 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3502 &ipv6_hdr(skb)->daddr,
3503 0, IPPROTO_TCP, 0));
3507 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3511 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3513 * @bp: driver handle
3515 * @parsing_data: data to be updated
3516 * @xmit_type: xmit flags
3518 * 57712/578xx related, when skb has encapsulation
3520 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3521 u32 *parsing_data, u32 xmit_type)
3524 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3525 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3526 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3528 if (xmit_type & XMIT_CSUM_TCP) {
3529 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3530 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3531 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3533 return skb_inner_transport_header(skb) +
3534 inner_tcp_hdrlen(skb) - skb->data;
3537 /* We support checksum offload for TCP and UDP only.
3538 * No need to pass the UDP header length - it's a constant.
3540 return skb_inner_transport_header(skb) +
3541 sizeof(struct udphdr) - skb->data;
3545 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3547 * @bp: driver handle
3549 * @parsing_data: data to be updated
3550 * @xmit_type: xmit flags
3552 * 57712/578xx related
3554 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3555 u32 *parsing_data, u32 xmit_type)
3558 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3559 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3560 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3562 if (xmit_type & XMIT_CSUM_TCP) {
3563 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3564 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3565 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3567 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3569 /* We support checksum offload for TCP and UDP only.
3570 * No need to pass the UDP header length - it's a constant.
3572 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3575 /* set FW indication according to inner or outer protocols if tunneled */
3576 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3577 struct eth_tx_start_bd *tx_start_bd,
3580 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3582 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3583 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3585 if (!(xmit_type & XMIT_CSUM_TCP))
3586 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3590 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3592 * @bp: driver handle
3594 * @pbd: parse BD to be updated
3595 * @xmit_type: xmit flags
3597 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3598 struct eth_tx_parse_bd_e1x *pbd,
3601 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3603 /* for now NS flag is not used in Linux */
3606 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3607 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3609 pbd->ip_hlen_w = (skb_transport_header(skb) -
3610 skb_network_header(skb)) >> 1;
3612 hlen += pbd->ip_hlen_w;
3614 /* We support checksum offload for TCP and UDP only */
3615 if (xmit_type & XMIT_CSUM_TCP)
3616 hlen += tcp_hdrlen(skb) / 2;
3618 hlen += sizeof(struct udphdr) / 2;
3620 pbd->total_hlen_w = cpu_to_le16(hlen);
3623 if (xmit_type & XMIT_CSUM_TCP) {
3624 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3627 s8 fix = SKB_CS_OFF(skb); /* signed! */
3629 DP(NETIF_MSG_TX_QUEUED,
3630 "hlen %d fix %d csum before fix %x\n",
3631 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3633 /* HW bug: fixup the CSUM */
3634 pbd->tcp_pseudo_csum =
3635 bnx2x_csum_fix(skb_transport_header(skb),
3638 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3639 pbd->tcp_pseudo_csum);
3645 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3646 struct eth_tx_parse_bd_e2 *pbd_e2,
3647 struct eth_tx_parse_2nd_bd *pbd2,
3652 u8 outerip_off, outerip_len = 0;
3654 /* from outer IP to transport */
3655 hlen_w = (skb_inner_transport_header(skb) -
3656 skb_network_header(skb)) >> 1;
3659 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3661 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3663 /* outer IP header info */
3664 if (xmit_type & XMIT_CSUM_V4) {
3665 struct iphdr *iph = ip_hdr(skb);
3666 u32 csum = (__force u32)(~iph->check) -
3667 (__force u32)iph->tot_len -
3668 (__force u32)iph->frag_off;
3670 outerip_len = iph->ihl << 1;
3672 pbd2->fw_ip_csum_wo_len_flags_frag =
3673 bswab16(csum_fold((__force __wsum)csum));
3675 pbd2->fw_ip_hdr_to_payload_w =
3676 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3677 pbd_e2->data.tunnel_data.flags |=
3678 ETH_TUNNEL_DATA_IPV6_OUTER;
3681 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3683 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3685 /* inner IP header info */
3686 if (xmit_type & XMIT_CSUM_ENC_V4) {
3687 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3689 pbd_e2->data.tunnel_data.pseudo_csum =
3690 bswab16(~csum_tcpudp_magic(
3691 inner_ip_hdr(skb)->saddr,
3692 inner_ip_hdr(skb)->daddr,
3693 0, IPPROTO_TCP, 0));
3695 pbd_e2->data.tunnel_data.pseudo_csum =
3696 bswab16(~csum_ipv6_magic(
3697 &inner_ipv6_hdr(skb)->saddr,
3698 &inner_ipv6_hdr(skb)->daddr,
3699 0, IPPROTO_TCP, 0));
3702 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3707 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3708 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3709 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3711 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3712 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3713 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3717 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3720 struct ipv6hdr *ipv6;
3722 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3725 if (xmit_type & XMIT_GSO_ENC_V6)
3726 ipv6 = inner_ipv6_hdr(skb);
3727 else /* XMIT_GSO_V6 */
3728 ipv6 = ipv6_hdr(skb);
3730 if (ipv6->nexthdr == NEXTHDR_IPV6)
3731 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3734 /* called with netif_tx_lock
3735 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3736 * netif_wake_queue()
3738 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3740 struct bnx2x *bp = netdev_priv(dev);
3742 struct netdev_queue *txq;
3743 struct bnx2x_fp_txdata *txdata;
3744 struct sw_tx_bd *tx_buf;
3745 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3746 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3747 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3748 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3749 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3750 u32 pbd_e2_parsing_data = 0;
3751 u16 pkt_prod, bd_prod;
3754 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3757 __le16 pkt_size = 0;
3759 u8 mac_type = UNICAST_ADDRESS;
3761 #ifdef BNX2X_STOP_ON_ERROR
3762 if (unlikely(bp->panic))
3763 return NETDEV_TX_BUSY;
3766 txq_index = skb_get_queue_mapping(skb);
3767 txq = netdev_get_tx_queue(dev, txq_index);
3769 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3771 txdata = &bp->bnx2x_txq[txq_index];
3773 /* enable this debug print to view the transmission queue being used
3774 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3775 txq_index, fp_index, txdata_index); */
3777 /* enable this debug print to view the transmission details
3778 DP(NETIF_MSG_TX_QUEUED,
3779 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3780 txdata->cid, fp_index, txdata_index, txdata, fp); */
3782 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3783 skb_shinfo(skb)->nr_frags +
3785 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3786 /* Handle special storage cases separately */
3787 if (txdata->tx_ring_size == 0) {
3788 struct bnx2x_eth_q_stats *q_stats =
3789 bnx2x_fp_qstats(bp, txdata->parent_fp);
3790 q_stats->driver_filtered_tx_pkt++;
3792 return NETDEV_TX_OK;
3794 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3795 netif_tx_stop_queue(txq);
3796 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3798 return NETDEV_TX_BUSY;
3801 DP(NETIF_MSG_TX_QUEUED,
3802 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3803 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3804 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3807 eth = (struct ethhdr *)skb->data;
3809 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3810 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3811 if (is_broadcast_ether_addr(eth->h_dest))
3812 mac_type = BROADCAST_ADDRESS;
3814 mac_type = MULTICAST_ADDRESS;
3817 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3818 /* First, check if we need to linearize the skb (due to FW
3819 restrictions). No need to check fragmentation if page size > 8K
3820 (there will be no violation to FW restrictions) */
3821 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3822 /* Statistics of linearization */
3824 if (skb_linearize(skb) != 0) {
3825 DP(NETIF_MSG_TX_QUEUED,
3826 "SKB linearization failed - silently dropping this SKB\n");
3827 dev_kfree_skb_any(skb);
3828 return NETDEV_TX_OK;
3832 /* Map skb linear data for DMA */
3833 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3834 skb_headlen(skb), DMA_TO_DEVICE);
3835 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3836 DP(NETIF_MSG_TX_QUEUED,
3837 "SKB mapping failed - silently dropping this SKB\n");
3838 dev_kfree_skb_any(skb);
3839 return NETDEV_TX_OK;
3842 Please read carefully. First we use one BD which we mark as start,
3843 then we have a parsing info BD (used for TSO or xsum),
3844 and only then we have the rest of the TSO BDs.
3845 (don't forget to mark the last one as last,
3846 and to unmap only AFTER you write to the BD ...)
3847 And above all, all pdb sizes are in words - NOT DWORDS!
3850 /* get current pkt produced now - advance it just before sending packet
3851 * since mapping of pages may fail and cause packet to be dropped
3853 pkt_prod = txdata->tx_pkt_prod;
3854 bd_prod = TX_BD(txdata->tx_bd_prod);
3856 /* get a tx_buf and first BD
3857 * tx_start_bd may be changed during SPLIT,
3858 * but first_bd will always stay first
3860 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3861 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3862 first_bd = tx_start_bd;
3864 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3866 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3867 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3868 bp->eth_stats.ptp_skip_tx_ts++;
3869 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3870 } else if (bp->ptp_tx_skb) {
3871 bp->eth_stats.ptp_skip_tx_ts++;
3872 netdev_err_once(bp->dev,
3873 "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3875 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3876 /* schedule check for Tx timestamp */
3877 bp->ptp_tx_skb = skb_get(skb);
3878 bp->ptp_tx_start = jiffies;
3879 schedule_work(&bp->ptp_task);
3883 /* header nbd: indirectly zero other flags! */
3884 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3886 /* remember the first BD of the packet */
3887 tx_buf->first_bd = txdata->tx_bd_prod;
3891 DP(NETIF_MSG_TX_QUEUED,
3892 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3893 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3895 if (skb_vlan_tag_present(skb)) {
3896 tx_start_bd->vlan_or_ethertype =
3897 cpu_to_le16(skb_vlan_tag_get(skb));
3898 tx_start_bd->bd_flags.as_bitfield |=
3899 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3901 /* when transmitting in a vf, start bd must hold the ethertype
3902 * for fw to enforce it
3905 #ifndef BNX2X_STOP_ON_ERROR
3908 /* Still need to consider inband vlan for enforced */
3909 if (__vlan_get_tag(skb, &vlan_tci)) {
3910 tx_start_bd->vlan_or_ethertype =
3911 cpu_to_le16(ntohs(eth->h_proto));
3913 tx_start_bd->bd_flags.as_bitfield |=
3914 (X_ETH_INBAND_VLAN <<
3915 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3916 tx_start_bd->vlan_or_ethertype =
3917 cpu_to_le16(vlan_tci);
3919 #ifndef BNX2X_STOP_ON_ERROR
3921 /* used by FW for packet accounting */
3922 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3927 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3929 /* turn on parsing and get a BD */
3930 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3932 if (xmit_type & XMIT_CSUM)
3933 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3935 if (!CHIP_IS_E1x(bp)) {
3936 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3937 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3939 if (xmit_type & XMIT_CSUM_ENC) {
3940 u16 global_data = 0;
3942 /* Set PBD in enc checksum offload case */
3943 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3944 &pbd_e2_parsing_data,
3947 /* turn on 2nd parsing and get a BD */
3948 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3950 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3952 memset(pbd2, 0, sizeof(*pbd2));
3954 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3955 (skb_inner_network_header(skb) -
3958 if (xmit_type & XMIT_GSO_ENC)
3959 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3963 pbd2->global_data = cpu_to_le16(global_data);
3965 /* add addition parse BD indication to start BD */
3966 SET_FLAG(tx_start_bd->general_data,
3967 ETH_TX_START_BD_PARSE_NBDS, 1);
3968 /* set encapsulation flag in start BD */
3969 SET_FLAG(tx_start_bd->general_data,
3970 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3972 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3975 } else if (xmit_type & XMIT_CSUM) {
3976 /* Set PBD in checksum offload case w/o encapsulation */
3977 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3978 &pbd_e2_parsing_data,
3982 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3983 /* Add the macs to the parsing BD if this is a vf or if
3984 * Tx Switching is enabled.
3987 /* override GRE parameters in BD */
3988 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3989 &pbd_e2->data.mac_addr.src_mid,
3990 &pbd_e2->data.mac_addr.src_lo,
3993 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3994 &pbd_e2->data.mac_addr.dst_mid,
3995 &pbd_e2->data.mac_addr.dst_lo,
3998 if (bp->flags & TX_SWITCHING)
3999 bnx2x_set_fw_mac_addr(
4000 &pbd_e2->data.mac_addr.dst_hi,
4001 &pbd_e2->data.mac_addr.dst_mid,
4002 &pbd_e2->data.mac_addr.dst_lo,
4004 #ifdef BNX2X_STOP_ON_ERROR
4005 /* Enforce security is always set in Stop on Error -
4006 * source mac should be present in the parsing BD
4008 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4009 &pbd_e2->data.mac_addr.src_mid,
4010 &pbd_e2->data.mac_addr.src_lo,
4015 SET_FLAG(pbd_e2_parsing_data,
4016 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4018 u16 global_data = 0;
4019 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4020 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4021 /* Set PBD in checksum offload case */
4022 if (xmit_type & XMIT_CSUM)
4023 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4025 SET_FLAG(global_data,
4026 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4027 pbd_e1x->global_data |= cpu_to_le16(global_data);
4030 /* Setup the data pointer of the first BD of the packet */
4031 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4032 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4033 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4034 pkt_size = tx_start_bd->nbytes;
4036 DP(NETIF_MSG_TX_QUEUED,
4037 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4038 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4039 le16_to_cpu(tx_start_bd->nbytes),
4040 tx_start_bd->bd_flags.as_bitfield,
4041 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4043 if (xmit_type & XMIT_GSO) {
4045 DP(NETIF_MSG_TX_QUEUED,
4046 "TSO packet len %d hlen %d total len %d tso size %d\n",
4047 skb->len, hlen, skb_headlen(skb),
4048 skb_shinfo(skb)->gso_size);
4050 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4052 if (unlikely(skb_headlen(skb) > hlen)) {
4054 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4058 if (!CHIP_IS_E1x(bp))
4059 pbd_e2_parsing_data |=
4060 (skb_shinfo(skb)->gso_size <<
4061 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4062 ETH_TX_PARSE_BD_E2_LSO_MSS;
4064 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4067 /* Set the PBD's parsing_data field if not zero
4068 * (for the chips newer than 57711).
4070 if (pbd_e2_parsing_data)
4071 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4073 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4075 /* Handle fragmented skb */
4076 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4077 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4079 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4080 skb_frag_size(frag), DMA_TO_DEVICE);
4081 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4082 unsigned int pkts_compl = 0, bytes_compl = 0;
4084 DP(NETIF_MSG_TX_QUEUED,
4085 "Unable to map page - dropping packet...\n");
4087 /* we need unmap all buffers already mapped
4089 * first_bd->nbd need to be properly updated
4090 * before call to bnx2x_free_tx_pkt
4092 first_bd->nbd = cpu_to_le16(nbd);
4093 bnx2x_free_tx_pkt(bp, txdata,
4094 TX_BD(txdata->tx_pkt_prod),
4095 &pkts_compl, &bytes_compl);
4096 return NETDEV_TX_OK;
4099 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4100 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4101 if (total_pkt_bd == NULL)
4102 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4104 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4105 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4106 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4107 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4110 DP(NETIF_MSG_TX_QUEUED,
4111 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4112 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4113 le16_to_cpu(tx_data_bd->nbytes));
4116 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4118 /* update with actual num BDs */
4119 first_bd->nbd = cpu_to_le16(nbd);
4121 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4123 /* now send a tx doorbell, counting the next BD
4124 * if the packet contains or ends with it
4126 if (TX_BD_POFF(bd_prod) < nbd)
4129 /* total_pkt_bytes should be set on the first data BD if
4130 * it's not an LSO packet and there is more than one
4131 * data BD. In this case pkt_size is limited by an MTU value.
4132 * However we prefer to set it for an LSO packet (while we don't
4133 * have to) in order to save some CPU cycles in a none-LSO
4134 * case, when we much more care about them.
4136 if (total_pkt_bd != NULL)
4137 total_pkt_bd->total_pkt_bytes = pkt_size;
4140 DP(NETIF_MSG_TX_QUEUED,
4141 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4142 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4143 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4144 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4145 le16_to_cpu(pbd_e1x->total_hlen_w));
4147 DP(NETIF_MSG_TX_QUEUED,
4148 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4150 pbd_e2->data.mac_addr.dst_hi,
4151 pbd_e2->data.mac_addr.dst_mid,
4152 pbd_e2->data.mac_addr.dst_lo,
4153 pbd_e2->data.mac_addr.src_hi,
4154 pbd_e2->data.mac_addr.src_mid,
4155 pbd_e2->data.mac_addr.src_lo,
4156 pbd_e2->parsing_data);
4157 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4159 netdev_tx_sent_queue(txq, skb->len);
4161 skb_tx_timestamp(skb);
4163 txdata->tx_pkt_prod++;
4165 * Make sure that the BD data is updated before updating the producer
4166 * since FW might read the BD right after the producer is updated.
4167 * This is only applicable for weak-ordered memory model archs such
4168 * as IA-64. The following barrier is also mandatory since FW will
4169 * assumes packets must have BDs.
4173 txdata->tx_db.data.prod += nbd;
4174 /* make sure descriptor update is observed by HW */
4177 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4179 txdata->tx_bd_prod += nbd;
4181 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4182 netif_tx_stop_queue(txq);
4184 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4185 * ordering of set_bit() in netif_tx_stop_queue() and read of
4189 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4190 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4191 netif_tx_wake_queue(txq);
4195 return NETDEV_TX_OK;
4198 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4200 int mfw_vn = BP_FW_MB_IDX(bp);
4203 /* If the shmem shouldn't affect configuration, reflect */
4204 if (!IS_MF_BD(bp)) {
4207 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4214 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4215 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4216 c2s_map[0] = tmp & 0xff;
4217 c2s_map[1] = (tmp >> 8) & 0xff;
4218 c2s_map[2] = (tmp >> 16) & 0xff;
4219 c2s_map[3] = (tmp >> 24) & 0xff;
4221 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4222 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4223 c2s_map[4] = tmp & 0xff;
4224 c2s_map[5] = (tmp >> 8) & 0xff;
4225 c2s_map[6] = (tmp >> 16) & 0xff;
4226 c2s_map[7] = (tmp >> 24) & 0xff;
4228 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4229 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4230 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4234 * bnx2x_setup_tc - routine to configure net_device for multi tc
4236 * @dev: net device to configure
4237 * @num_tc: number of traffic classes to enable
4239 * callback connected to the ndo_setup_tc function pointer
4241 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4243 struct bnx2x *bp = netdev_priv(dev);
4244 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4245 int cos, prio, count, offset;
4247 /* setup tc must be called under rtnl lock */
4250 /* no traffic classes requested. Aborting */
4252 netdev_reset_tc(dev);
4256 /* requested to support too many traffic classes */
4257 if (num_tc > bp->max_cos) {
4258 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4259 num_tc, bp->max_cos);
4263 /* declare amount of supported traffic classes */
4264 if (netdev_set_num_tc(dev, num_tc)) {
4265 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4269 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4271 /* configure priority to traffic class mapping */
4272 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4273 int outer_prio = c2s_map[prio];
4275 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4276 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4277 "mapping priority %d to tc %d\n",
4278 outer_prio, bp->prio_to_cos[outer_prio]);
4281 /* Use this configuration to differentiate tc0 from other COSes
4282 This can be used for ets or pfc, and save the effort of setting
4283 up a multio class queue disc or negotiating DCBX with a switch
4284 netdev_set_prio_tc_map(dev, 0, 0);
4285 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4286 for (prio = 1; prio < 16; prio++) {
4287 netdev_set_prio_tc_map(dev, prio, 1);
4288 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4291 /* configure traffic class to transmission queue mapping */
4292 for (cos = 0; cos < bp->max_cos; cos++) {
4293 count = BNX2X_NUM_ETH_QUEUES(bp);
4294 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4295 netdev_set_tc_queue(dev, cos, count, offset);
4296 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4297 "mapping tc %d to offset %d count %d\n",
4298 cos, offset, count);
4304 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4307 struct tc_mqprio_qopt *mqprio = type_data;
4309 if (type != TC_SETUP_QDISC_MQPRIO)
4312 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4314 return bnx2x_setup_tc(dev, mqprio->num_tc);
4317 /* called with rtnl_lock */
4318 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4320 struct sockaddr *addr = p;
4321 struct bnx2x *bp = netdev_priv(dev);
4324 if (!is_valid_ether_addr(addr->sa_data)) {
4325 BNX2X_ERR("Requested MAC address is not valid\n");
4329 if (IS_MF_STORAGE_ONLY(bp)) {
4330 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4334 if (netif_running(dev)) {
4335 rc = bnx2x_set_eth_mac(bp, false);
4340 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4342 if (netif_running(dev))
4343 rc = bnx2x_set_eth_mac(bp, true);
4345 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4346 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4351 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4353 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4354 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4359 if (IS_FCOE_IDX(fp_index)) {
4360 memset(sb, 0, sizeof(union host_hc_status_block));
4361 fp->status_blk_mapping = 0;
4364 if (!CHIP_IS_E1x(bp))
4365 BNX2X_PCI_FREE(sb->e2_sb,
4366 bnx2x_fp(bp, fp_index,
4367 status_blk_mapping),
4368 sizeof(struct host_hc_status_block_e2));
4370 BNX2X_PCI_FREE(sb->e1x_sb,
4371 bnx2x_fp(bp, fp_index,
4372 status_blk_mapping),
4373 sizeof(struct host_hc_status_block_e1x));
4377 if (!skip_rx_queue(bp, fp_index)) {
4378 bnx2x_free_rx_bds(fp);
4380 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4381 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4382 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4383 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4384 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4386 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4387 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4388 sizeof(struct eth_fast_path_rx_cqe) *
4392 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4393 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4394 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4395 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4399 if (!skip_tx_queue(bp, fp_index)) {
4400 /* fastpath tx rings: tx_buf tx_desc */
4401 for_each_cos_in_tx_queue(fp, cos) {
4402 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4404 DP(NETIF_MSG_IFDOWN,
4405 "freeing tx memory of fp %d cos %d cid %d\n",
4406 fp_index, cos, txdata->cid);
4408 BNX2X_FREE(txdata->tx_buf_ring);
4409 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4410 txdata->tx_desc_mapping,
4411 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4414 /* end of fastpath */
4417 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4420 for_each_cnic_queue(bp, i)
4421 bnx2x_free_fp_mem_at(bp, i);
4424 void bnx2x_free_fp_mem(struct bnx2x *bp)
4427 for_each_eth_queue(bp, i)
4428 bnx2x_free_fp_mem_at(bp, i);
4431 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4433 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4434 if (!CHIP_IS_E1x(bp)) {
4435 bnx2x_fp(bp, index, sb_index_values) =
4436 (__le16 *)status_blk.e2_sb->sb.index_values;
4437 bnx2x_fp(bp, index, sb_running_index) =
4438 (__le16 *)status_blk.e2_sb->sb.running_index;
4440 bnx2x_fp(bp, index, sb_index_values) =
4441 (__le16 *)status_blk.e1x_sb->sb.index_values;
4442 bnx2x_fp(bp, index, sb_running_index) =
4443 (__le16 *)status_blk.e1x_sb->sb.running_index;
4447 /* Returns the number of actually allocated BDs */
4448 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4451 struct bnx2x *bp = fp->bp;
4452 u16 ring_prod, cqe_ring_prod;
4453 int i, failure_cnt = 0;
4455 fp->rx_comp_cons = 0;
4456 cqe_ring_prod = ring_prod = 0;
4458 /* This routine is called only during fo init so
4459 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4461 for (i = 0; i < rx_ring_size; i++) {
4462 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4466 ring_prod = NEXT_RX_IDX(ring_prod);
4467 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4468 WARN_ON(ring_prod <= (i - failure_cnt));
4472 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4473 i - failure_cnt, fp->index);
4475 fp->rx_bd_prod = ring_prod;
4476 /* Limit the CQE producer by the CQE ring size */
4477 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4480 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4482 return i - failure_cnt;
4485 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4489 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4490 struct eth_rx_cqe_next_page *nextpg;
4492 nextpg = (struct eth_rx_cqe_next_page *)
4493 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4495 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4496 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4498 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4499 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4503 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4505 union host_hc_status_block *sb;
4506 struct bnx2x_fastpath *fp = &bp->fp[index];
4509 int rx_ring_size = 0;
4511 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4512 rx_ring_size = MIN_RX_SIZE_NONTPA;
4513 bp->rx_ring_size = rx_ring_size;
4514 } else if (!bp->rx_ring_size) {
4515 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4517 if (CHIP_IS_E3(bp)) {
4518 u32 cfg = SHMEM_RD(bp,
4519 dev_info.port_hw_config[BP_PORT(bp)].
4522 /* Decrease ring size for 1G functions */
4523 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4524 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4528 /* allocate at least number of buffers required by FW */
4529 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4530 MIN_RX_SIZE_TPA, rx_ring_size);
4532 bp->rx_ring_size = rx_ring_size;
4533 } else /* if rx_ring_size specified - use it */
4534 rx_ring_size = bp->rx_ring_size;
4536 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4539 sb = &bnx2x_fp(bp, index, status_blk);
4541 if (!IS_FCOE_IDX(index)) {
4543 if (!CHIP_IS_E1x(bp)) {
4544 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4545 sizeof(struct host_hc_status_block_e2));
4549 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4550 sizeof(struct host_hc_status_block_e1x));
4556 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4557 * set shortcuts for it.
4559 if (!IS_FCOE_IDX(index))
4560 set_sb_shortcuts(bp, index);
4563 if (!skip_tx_queue(bp, index)) {
4564 /* fastpath tx rings: tx_buf tx_desc */
4565 for_each_cos_in_tx_queue(fp, cos) {
4566 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4569 "allocating tx memory of fp %d cos %d\n",
4572 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4573 sizeof(struct sw_tx_bd),
4575 if (!txdata->tx_buf_ring)
4577 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4578 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4579 if (!txdata->tx_desc_ring)
4585 if (!skip_rx_queue(bp, index)) {
4586 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4587 bnx2x_fp(bp, index, rx_buf_ring) =
4588 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4589 if (!bnx2x_fp(bp, index, rx_buf_ring))
4591 bnx2x_fp(bp, index, rx_desc_ring) =
4592 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4593 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4594 if (!bnx2x_fp(bp, index, rx_desc_ring))
4597 /* Seed all CQEs by 1s */
4598 bnx2x_fp(bp, index, rx_comp_ring) =
4599 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4600 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4601 if (!bnx2x_fp(bp, index, rx_comp_ring))
4605 bnx2x_fp(bp, index, rx_page_ring) =
4606 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4608 if (!bnx2x_fp(bp, index, rx_page_ring))
4610 bnx2x_fp(bp, index, rx_sge_ring) =
4611 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4612 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4613 if (!bnx2x_fp(bp, index, rx_sge_ring))
4616 bnx2x_set_next_page_rx_bd(fp);
4619 bnx2x_set_next_page_rx_cq(fp);
4622 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4623 if (ring_size < rx_ring_size)
4629 /* handles low memory cases */
4631 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4633 /* FW will drop all packets if queue is not big enough,
4634 * In these cases we disable the queue
4635 * Min size is different for OOO, TPA and non-TPA queues
4637 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4638 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4639 /* release memory allocated for this queue */
4640 bnx2x_free_fp_mem_at(bp, index);
4646 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4650 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4651 /* we will fail load process instead of mark
4659 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4663 /* 1. Allocate FP for leading - fatal if error
4664 * 2. Allocate RSS - fix number of queues if error
4668 if (bnx2x_alloc_fp_mem_at(bp, 0))
4672 for_each_nondefault_eth_queue(bp, i)
4673 if (bnx2x_alloc_fp_mem_at(bp, i))
4676 /* handle memory failures */
4677 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4678 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4681 bnx2x_shrink_eth_fp(bp, delta);
4682 if (CNIC_SUPPORT(bp))
4683 /* move non eth FPs next to last eth FP
4684 * must be done in that order
4685 * FCOE_IDX < FWD_IDX < OOO_IDX
4688 /* move FCoE fp even NO_FCOE_FLAG is on */
4689 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4690 bp->num_ethernet_queues -= delta;
4691 bp->num_queues = bp->num_ethernet_queues +
4692 bp->num_cnic_queues;
4693 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4694 bp->num_queues + delta, bp->num_queues);
4700 void bnx2x_free_mem_bp(struct bnx2x *bp)
4704 for (i = 0; i < bp->fp_array_size; i++)
4705 kfree(bp->fp[i].tpa_info);
4708 kfree(bp->fp_stats);
4709 kfree(bp->bnx2x_txq);
4710 kfree(bp->msix_table);
4714 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4716 struct bnx2x_fastpath *fp;
4717 struct msix_entry *tbl;
4718 struct bnx2x_ilt *ilt;
4719 int msix_table_size = 0;
4720 int fp_array_size, txq_array_size;
4724 * The biggest MSI-X table we might need is as a maximum number of fast
4725 * path IGU SBs plus default SB (for PF only).
4727 msix_table_size = bp->igu_sb_cnt;
4730 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4732 /* fp array: RSS plus CNIC related L2 queues */
4733 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4734 bp->fp_array_size = fp_array_size;
4735 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4737 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4740 for (i = 0; i < bp->fp_array_size; i++) {
4742 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4743 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4744 if (!(fp[i].tpa_info))
4750 /* allocate sp objs */
4751 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4756 /* allocate fp_stats */
4757 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4762 /* Allocate memory for the transmission queues array */
4764 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4765 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4767 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4773 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4776 bp->msix_table = tbl;
4779 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4786 bnx2x_free_mem_bp(bp);
4790 int bnx2x_reload_if_running(struct net_device *dev)
4792 struct bnx2x *bp = netdev_priv(dev);
4794 if (unlikely(!netif_running(dev)))
4797 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4798 return bnx2x_nic_load(bp, LOAD_NORMAL);
4801 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4803 u32 sel_phy_idx = 0;
4804 if (bp->link_params.num_phys <= 1)
4807 if (bp->link_vars.link_up) {
4808 sel_phy_idx = EXT_PHY1;
4809 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4810 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4811 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4812 sel_phy_idx = EXT_PHY2;
4815 switch (bnx2x_phy_selection(&bp->link_params)) {
4816 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4817 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4818 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4819 sel_phy_idx = EXT_PHY1;
4821 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4822 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4823 sel_phy_idx = EXT_PHY2;
4830 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4832 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4834 * The selected activated PHY is always after swapping (in case PHY
4835 * swapping is enabled). So when swapping is enabled, we need to reverse
4839 if (bp->link_params.multi_phy_config &
4840 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4841 if (sel_phy_idx == EXT_PHY1)
4842 sel_phy_idx = EXT_PHY2;
4843 else if (sel_phy_idx == EXT_PHY2)
4844 sel_phy_idx = EXT_PHY1;
4846 return LINK_CONFIG_IDX(sel_phy_idx);
4849 #ifdef NETDEV_FCOE_WWNN
4850 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4852 struct bnx2x *bp = netdev_priv(dev);
4853 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4856 case NETDEV_FCOE_WWNN:
4857 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4858 cp->fcoe_wwn_node_name_lo);
4860 case NETDEV_FCOE_WWPN:
4861 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4862 cp->fcoe_wwn_port_name_lo);
4865 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4873 /* called with rtnl_lock */
4874 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4876 struct bnx2x *bp = netdev_priv(dev);
4878 if (pci_num_vf(bp->pdev)) {
4879 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4883 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4884 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4888 /* This does not race with packet allocation
4889 * because the actual alloc size is
4890 * only updated as part of load
4894 if (!bnx2x_mtu_allows_gro(new_mtu))
4895 dev->features &= ~NETIF_F_GRO_HW;
4897 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4898 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4900 return bnx2x_reload_if_running(dev);
4903 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4904 netdev_features_t features)
4906 struct bnx2x *bp = netdev_priv(dev);
4908 if (pci_num_vf(bp->pdev)) {
4909 netdev_features_t changed = dev->features ^ features;
4911 /* Revert the requested changes in features if they
4912 * would require internal reload of PF in bnx2x_set_features().
4914 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4915 features &= ~NETIF_F_RXCSUM;
4916 features |= dev->features & NETIF_F_RXCSUM;
4919 if (changed & NETIF_F_LOOPBACK) {
4920 features &= ~NETIF_F_LOOPBACK;
4921 features |= dev->features & NETIF_F_LOOPBACK;
4925 /* TPA requires Rx CSUM offloading */
4926 if (!(features & NETIF_F_RXCSUM))
4927 features &= ~NETIF_F_LRO;
4929 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4930 features &= ~NETIF_F_GRO_HW;
4931 if (features & NETIF_F_GRO_HW)
4932 features &= ~NETIF_F_LRO;
4937 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4939 struct bnx2x *bp = netdev_priv(dev);
4940 netdev_features_t changes = features ^ dev->features;
4941 bool bnx2x_reload = false;
4944 /* VFs or non SRIOV PFs should be able to change loopback feature */
4945 if (!pci_num_vf(bp->pdev)) {
4946 if (features & NETIF_F_LOOPBACK) {
4947 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4948 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4949 bnx2x_reload = true;
4952 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4953 bp->link_params.loopback_mode = LOOPBACK_NONE;
4954 bnx2x_reload = true;
4959 /* Don't care about GRO changes */
4960 changes &= ~NETIF_F_GRO;
4963 bnx2x_reload = true;
4966 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4967 dev->features = features;
4968 rc = bnx2x_reload_if_running(dev);
4971 /* else: bnx2x_nic_load() will be called at end of recovery */
4977 void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
4979 struct bnx2x *bp = netdev_priv(dev);
4981 /* We want the information of the dump logged,
4982 * but calling bnx2x_panic() would kill all chances of recovery.
4985 #ifndef BNX2X_STOP_ON_ERROR
4986 bnx2x_panic_dump(bp, false);
4991 /* This allows the netif to be shutdown gracefully before resetting */
4992 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4995 static int __maybe_unused bnx2x_suspend(struct device *dev_d)
4997 struct pci_dev *pdev = to_pci_dev(dev_d);
4998 struct net_device *dev = pci_get_drvdata(pdev);
5002 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5005 bp = netdev_priv(dev);
5009 if (!netif_running(dev)) {
5014 netif_device_detach(dev);
5016 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5023 static int __maybe_unused bnx2x_resume(struct device *dev_d)
5025 struct pci_dev *pdev = to_pci_dev(dev_d);
5026 struct net_device *dev = pci_get_drvdata(pdev);
5031 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5034 bp = netdev_priv(dev);
5036 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5037 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5043 if (!netif_running(dev)) {
5048 netif_device_attach(dev);
5050 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5057 SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);
5059 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5063 BNX2X_ERR("bad context pointer %p\n", cxt);
5067 /* ustorm cxt validation */
5068 cxt->ustorm_ag_context.cdu_usage =
5069 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5070 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5071 /* xcontext validation */
5072 cxt->xstorm_ag_context.cdu_reserved =
5073 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5074 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5077 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5078 u8 fw_sb_id, u8 sb_index,
5081 u32 addr = BAR_CSTRORM_INTMEM +
5082 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5083 REG_WR8(bp, addr, ticks);
5085 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5086 port, fw_sb_id, sb_index, ticks);
5089 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5090 u16 fw_sb_id, u8 sb_index,
5093 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5094 u32 addr = BAR_CSTRORM_INTMEM +
5095 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5096 u8 flags = REG_RD8(bp, addr);
5098 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5099 flags |= enable_flag;
5100 REG_WR8(bp, addr, flags);
5102 "port %x fw_sb_id %d sb_index %d disable %d\n",
5103 port, fw_sb_id, sb_index, disable);
5106 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5107 u8 sb_index, u8 disable, u16 usec)
5109 int port = BP_PORT(bp);
5110 u8 ticks = usec / BNX2X_BTR;
5112 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5114 disable = disable ? 1 : (usec ? 0 : 1);
5115 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5118 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5121 smp_mb__before_atomic();
5122 set_bit(flag, &bp->sp_rtnl_state);
5123 smp_mb__after_atomic();
5124 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5126 schedule_delayed_work(&bp->sp_rtnl_task, 0);