2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/prefetch.h>
46 #include "firmware_exports.h"
47 #include "cxgb3_offload.h"
51 #define SGE_RX_SM_BUF_SIZE 1536
53 #define SGE_RX_COPY_THRES 256
54 #define SGE_RX_PULL_LEN 128
56 #define SGE_PG_RSVD SMP_CACHE_BYTES
58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
62 #define FL0_PG_CHUNK_SIZE 2048
63 #define FL0_PG_ORDER 0
64 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67 #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
69 #define SGE_RX_DROP_THRES 16
70 #define RX_RECLAIM_PERIOD (HZ/4)
73 * Max number of Rx buffers we replenish at a time.
75 #define MAX_RX_REFILL 16U
77 * Period of the Tx buffer reclaim timer. This timer does not need to run
78 * frequently as Tx buffers are usually reclaimed by new Tx packets.
80 #define TX_RECLAIM_PERIOD (HZ / 4)
81 #define TX_RECLAIM_TIMER_CHUNK 64U
82 #define TX_RECLAIM_CHUNK 16U
84 /* WR size in bytes */
85 #define WR_LEN (WR_FLITS * 8)
88 * Types of Tx queues in each queue set. Order here matters, do not change.
90 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
92 /* Values for sge_txq.flags */
94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
99 __be64 flit[TX_DESC_FLITS];
109 struct tx_sw_desc { /* SW state per Tx descriptor */
111 u8 eop; /* set if last descriptor for packet */
112 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
113 u8 fragidx; /* first page fragment associated with descriptor */
114 s8 sflit; /* start flit of first SGL entry in descriptor */
117 struct rx_sw_desc { /* SW state per Rx descriptor */
120 struct fl_pg_chunk pg_chunk;
122 DEFINE_DMA_UNMAP_ADDR(dma_addr);
125 struct rsp_desc { /* response queue descriptor */
126 struct rss_header rss_hdr;
129 struct_group(immediate,
136 * Holds unmapping information for Tx packets that need deferred unmapping.
137 * This structure lives at skb->head and must be allocated by callers.
139 struct deferred_unmap_info {
140 struct pci_dev *pdev;
141 dma_addr_t addr[MAX_SKB_FRAGS + 1];
145 * Maps a number of flits to the number of Tx descriptors that can hold them.
148 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
150 * HW allows up to 4 descriptors to be combined into a WR.
152 static u8 flit_desc_map[] = {
154 #if SGE_NUM_GENBITS == 1
155 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
156 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
157 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
158 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
159 #elif SGE_NUM_GENBITS == 2
160 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
161 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
162 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
163 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
165 # error "SGE_NUM_GENBITS must be 1 or 2"
169 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
171 return container_of(q, struct sge_qset, rspq);
174 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
176 return container_of(q, struct sge_qset, txq[qidx]);
180 * refill_rspq - replenish an SGE response queue
181 * @adapter: the adapter
182 * @q: the response queue to replenish
183 * @credits: how many new responses to make available
185 * Replenishes a response queue by making the supplied number of responses
188 static inline void refill_rspq(struct adapter *adapter,
189 const struct sge_rspq *q, unsigned int credits)
192 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197 * need_skb_unmap - does the platform need unmapping of sk_buffs?
199 * Returns true if the platform needs sk_buff unmapping. The compiler
200 * optimizes away unnecessary code if this returns true.
202 static inline int need_skb_unmap(void)
204 #ifdef CONFIG_NEED_DMA_MAP_STATE
212 * unmap_skb - unmap a packet main body and its page fragments
214 * @q: the Tx queue containing Tx descriptors for the packet
215 * @cidx: index of Tx descriptor
216 * @pdev: the PCI device
218 * Unmap the main body of an sk_buff and its page fragments, if any.
219 * Because of the fairly complicated structure of our SGLs and the desire
220 * to conserve space for metadata, the information necessary to unmap an
221 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
222 * descriptors (the physical addresses of the various data buffers), and
223 * the SW descriptor state (assorted indices). The send functions
224 * initialize the indices for the first packet descriptor so we can unmap
225 * the buffers held in the first Tx descriptor here, and we have enough
226 * information at this point to set the state for the next Tx descriptor.
228 * Note that it is possible to clean up the first descriptor of a packet
229 * before the send routines have written the next descriptors, but this
230 * race does not cause any problem. We just end up writing the unmapping
231 * info for the descriptor first.
233 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
234 unsigned int cidx, struct pci_dev *pdev)
236 const struct sg_ent *sgp;
237 struct tx_sw_desc *d = &q->sdesc[cidx];
238 int nfrags, frag_idx, curflit, j = d->addr_idx;
240 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
241 frag_idx = d->fragidx;
243 if (frag_idx == 0 && skb_headlen(skb)) {
244 dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]),
245 skb_headlen(skb), DMA_TO_DEVICE);
249 curflit = d->sflit + 1 + j;
250 nfrags = skb_shinfo(skb)->nr_frags;
252 while (frag_idx < nfrags && curflit < WR_FLITS) {
253 dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]),
254 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
265 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
266 d = cidx + 1 == q->size ? q->sdesc : d + 1;
267 d->fragidx = frag_idx;
269 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
274 * free_tx_desc - reclaims Tx descriptors and their buffers
275 * @adapter: the adapter
276 * @q: the Tx queue to reclaim descriptors from
277 * @n: the number of descriptors to reclaim
279 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
280 * Tx buffers. Called with the Tx queue lock held.
282 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
285 struct tx_sw_desc *d;
286 struct pci_dev *pdev = adapter->pdev;
287 unsigned int cidx = q->cidx;
289 const int need_unmap = need_skb_unmap() &&
290 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294 if (d->skb) { /* an SGL is present */
296 unmap_skb(d->skb, q, cidx, pdev);
298 dev_consume_skb_any(d->skb);
303 if (++cidx == q->size) {
312 * reclaim_completed_tx - reclaims completed Tx descriptors
313 * @adapter: the adapter
314 * @q: the Tx queue to reclaim completed descriptors from
315 * @chunk: maximum number of descriptors to reclaim
317 * Reclaims Tx descriptors that the SGE has indicated it has processed,
318 * and frees the associated buffers if possible. Called with the Tx
321 static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325 unsigned int reclaim = q->processed - q->cleaned;
327 reclaim = min(chunk, reclaim);
329 free_tx_desc(adapter, q, reclaim);
330 q->cleaned += reclaim;
331 q->in_use -= reclaim;
333 return q->processed - q->cleaned;
337 * should_restart_tx - are there enough resources to restart a Tx queue?
340 * Checks if there are enough descriptors to restart a suspended Tx queue.
342 static inline int should_restart_tx(const struct sge_txq *q)
344 unsigned int r = q->processed - q->cleaned;
346 return q->in_use - r < (q->size >> 1);
349 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
350 struct rx_sw_desc *d)
352 if (q->use_pages && d->pg_chunk.page) {
353 (*d->pg_chunk.p_cnt)--;
354 if (!*d->pg_chunk.p_cnt)
355 dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
356 q->alloc_size, DMA_FROM_DEVICE);
358 put_page(d->pg_chunk.page);
359 d->pg_chunk.page = NULL;
361 dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr),
362 q->buf_size, DMA_FROM_DEVICE);
369 * free_rx_bufs - free the Rx buffers on an SGE free list
370 * @pdev: the PCI device associated with the adapter
371 * @q: the SGE free list to clean up
373 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
374 * this queue should be stopped before calling this function.
376 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
378 unsigned int cidx = q->cidx;
380 while (q->credits--) {
381 struct rx_sw_desc *d = &q->sdesc[cidx];
384 clear_rx_desc(pdev, q, d);
385 if (++cidx == q->size)
389 if (q->pg_chunk.page) {
390 __free_pages(q->pg_chunk.page, q->order);
391 q->pg_chunk.page = NULL;
396 * add_one_rx_buf - add a packet buffer to a free-buffer list
397 * @va: buffer start VA
398 * @len: the buffer length
399 * @d: the HW Rx descriptor to write
400 * @sd: the SW Rx descriptor to write
401 * @gen: the generation bit value
402 * @pdev: the PCI device associated with the adapter
404 * Add a buffer of the given length to the supplied HW and SW Rx
407 static inline int add_one_rx_buf(void *va, unsigned int len,
408 struct rx_desc *d, struct rx_sw_desc *sd,
409 unsigned int gen, struct pci_dev *pdev)
413 mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
414 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
417 dma_unmap_addr_set(sd, dma_addr, mapping);
419 d->addr_lo = cpu_to_be32(mapping);
420 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
422 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
423 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
427 static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
430 d->addr_lo = cpu_to_be32(mapping);
431 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
433 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
434 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
438 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
439 struct rx_sw_desc *sd, gfp_t gfp,
442 if (!q->pg_chunk.page) {
445 q->pg_chunk.page = alloc_pages(gfp, order);
446 if (unlikely(!q->pg_chunk.page))
448 q->pg_chunk.va = page_address(q->pg_chunk.page);
449 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
451 q->pg_chunk.offset = 0;
452 mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
453 0, q->alloc_size, DMA_FROM_DEVICE);
454 if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
455 __free_pages(q->pg_chunk.page, order);
456 q->pg_chunk.page = NULL;
459 q->pg_chunk.mapping = mapping;
461 sd->pg_chunk = q->pg_chunk;
463 prefetch(sd->pg_chunk.p_cnt);
465 q->pg_chunk.offset += q->buf_size;
466 if (q->pg_chunk.offset == (PAGE_SIZE << order))
467 q->pg_chunk.page = NULL;
469 q->pg_chunk.va += q->buf_size;
470 get_page(q->pg_chunk.page);
473 if (sd->pg_chunk.offset == 0)
474 *sd->pg_chunk.p_cnt = 1;
476 *sd->pg_chunk.p_cnt += 1;
481 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
483 if (q->pend_cred >= q->credits / 4) {
486 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
491 * refill_fl - refill an SGE free-buffer list
493 * @q: the free-list to refill
494 * @n: the number of new buffers to allocate
495 * @gfp: the gfp flags for allocating new buffers
497 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
498 * allocated with the supplied gfp flags. The caller must assure that
499 * @n does not exceed the queue's capacity.
501 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
503 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
504 struct rx_desc *d = &q->desc[q->pidx];
505 unsigned int count = 0;
512 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
514 nomem: q->alloc_failed++;
517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
518 dma_unmap_addr_set(sd, dma_addr, mapping);
520 add_one_rx_chunk(mapping, d, q->gen);
521 dma_sync_single_for_device(&adap->pdev->dev, mapping,
522 q->buf_size - SGE_PG_RSVD,
527 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
532 buf_start = skb->data;
533 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
536 clear_rx_desc(adap->pdev, q, sd);
543 if (++q->pidx == q->size) {
553 q->pend_cred += count;
559 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
561 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
562 GFP_ATOMIC | __GFP_COMP);
566 * recycle_rx_buf - recycle a receive buffer
568 * @q: the SGE free list
569 * @idx: index of buffer to recycle
571 * Recycles the specified buffer on the given free list by adding it at
572 * the next available slot on the list.
574 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
577 struct rx_desc *from = &q->desc[idx];
578 struct rx_desc *to = &q->desc[q->pidx];
580 q->sdesc[q->pidx] = q->sdesc[idx];
581 to->addr_lo = from->addr_lo; /* already big endian */
582 to->addr_hi = from->addr_hi; /* likewise */
584 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
585 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
587 if (++q->pidx == q->size) {
598 * alloc_ring - allocate resources for an SGE descriptor ring
599 * @pdev: the PCI device
600 * @nelem: the number of descriptors
601 * @elem_size: the size of each descriptor
602 * @sw_size: the size of the SW state associated with each ring element
603 * @phys: the physical address of the allocated ring
604 * @metadata: address of the array holding the SW state for the ring
606 * Allocates resources for an SGE descriptor ring, such as Tx queues,
607 * free buffer lists, or response queues. Each SGE ring requires
608 * space for its HW descriptors plus, optionally, space for the SW state
609 * associated with each HW entry (the metadata). The function returns
610 * three values: the virtual address for the HW ring (the return value
611 * of the function), the physical address of the HW ring, and the address
614 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
615 size_t sw_size, dma_addr_t * phys, void *metadata)
617 size_t len = nelem * elem_size;
619 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
623 if (sw_size && metadata) {
624 s = kcalloc(nelem, sw_size, GFP_KERNEL);
627 dma_free_coherent(&pdev->dev, len, p, *phys);
630 *(void **)metadata = s;
636 * t3_reset_qset - reset a sge qset
639 * Reset the qset structure.
640 * the NAPI structure is preserved in the event of
641 * the qset's reincarnation, for example during EEH recovery.
643 static void t3_reset_qset(struct sge_qset *q)
646 !(q->adap->flags & NAPI_INIT)) {
647 memset(q, 0, sizeof(*q));
652 memset(&q->rspq, 0, sizeof(q->rspq));
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
656 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
657 q->rx_reclaim_timer.function = NULL;
659 napi_free_frags(&q->napi);
664 * t3_free_qset - free the resources of an SGE queue set
665 * @adapter: the adapter owning the queue set
668 * Release the HW and SW resources associated with an SGE queue set, such
669 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
670 * queue set must be quiesced prior to calling this.
672 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
675 struct pci_dev *pdev = adapter->pdev;
677 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
679 spin_lock_irq(&adapter->sge.reg_lock);
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
681 spin_unlock_irq(&adapter->sge.reg_lock);
682 free_rx_bufs(pdev, &q->fl[i]);
683 kfree(q->fl[i].sdesc);
684 dma_free_coherent(&pdev->dev,
686 sizeof(struct rx_desc), q->fl[i].desc,
690 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
691 if (q->txq[i].desc) {
692 spin_lock_irq(&adapter->sge.reg_lock);
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
694 spin_unlock_irq(&adapter->sge.reg_lock);
695 if (q->txq[i].sdesc) {
696 free_tx_desc(adapter, &q->txq[i],
698 kfree(q->txq[i].sdesc);
700 dma_free_coherent(&pdev->dev,
702 sizeof(struct tx_desc),
703 q->txq[i].desc, q->txq[i].phys_addr);
704 __skb_queue_purge(&q->txq[i].sendq);
708 spin_lock_irq(&adapter->sge.reg_lock);
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
710 spin_unlock_irq(&adapter->sge.reg_lock);
711 dma_free_coherent(&pdev->dev,
712 q->rspq.size * sizeof(struct rsp_desc),
713 q->rspq.desc, q->rspq.phys_addr);
720 * init_qset_cntxt - initialize an SGE queue set context info
722 * @id: the queue set id
724 * Initializes the TIDs and context ids for the queues of a queue set.
726 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
728 qs->rspq.cntxt_id = id;
729 qs->fl[0].cntxt_id = 2 * id;
730 qs->fl[1].cntxt_id = 2 * id + 1;
731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
739 * sgl_len - calculates the size of an SGL of the given capacity
740 * @n: the number of SGL entries
742 * Calculates the number of flits needed for a scatter/gather list that
743 * can hold the given number of entries.
745 static inline unsigned int sgl_len(unsigned int n)
747 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
748 return (3 * n) / 2 + (n & 1);
752 * flits_to_desc - returns the num of Tx descriptors for the given flits
753 * @n: the number of flits
755 * Calculates the number of Tx descriptors needed for the supplied number
758 static inline unsigned int flits_to_desc(unsigned int n)
760 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
761 return flit_desc_map[n];
765 * get_packet - return the next ingress packet buffer from a free list
766 * @adap: the adapter that received the packet
767 * @fl: the SGE free list holding the packet
768 * @len: the packet length including any SGE padding
769 * @drop_thres: # of remaining buffers before we start dropping packets
771 * Get the next packet from a free list and complete setup of the
772 * sk_buff. If the packet is small we make a copy and recycle the
773 * original buffer, otherwise we use the original buffer itself. If a
774 * positive drop threshold is supplied packets are dropped and their
775 * buffers recycled if (a) the number of remaining buffers is under the
776 * threshold and the packet is too big to copy, or (b) the packet should
777 * be copied but there is no memory for the copy.
779 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
780 unsigned int len, unsigned int drop_thres)
782 struct sk_buff *skb = NULL;
783 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
785 prefetch(sd->skb->data);
788 if (len <= SGE_RX_COPY_THRES) {
789 skb = alloc_skb(len, GFP_ATOMIC);
790 if (likely(skb != NULL)) {
792 dma_sync_single_for_cpu(&adap->pdev->dev,
793 dma_unmap_addr(sd, dma_addr),
794 len, DMA_FROM_DEVICE);
795 memcpy(skb->data, sd->skb->data, len);
796 dma_sync_single_for_device(&adap->pdev->dev,
797 dma_unmap_addr(sd, dma_addr),
798 len, DMA_FROM_DEVICE);
799 } else if (!drop_thres)
802 recycle_rx_buf(adap, fl, fl->cidx);
806 if (unlikely(fl->credits < drop_thres) &&
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
808 GFP_ATOMIC | __GFP_COMP) == 0)
812 dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
813 fl->buf_size, DMA_FROM_DEVICE);
816 __refill_fl(adap, fl);
821 * get_packet_pg - return the next ingress packet buffer from a free list
822 * @adap: the adapter that received the packet
823 * @fl: the SGE free list holding the packet
825 * @len: the packet length including any SGE padding
826 * @drop_thres: # of remaining buffers before we start dropping packets
828 * Get the next packet from a free list populated with page chunks.
829 * If the packet is small we make a copy and recycle the original buffer,
830 * otherwise we attach the original buffer as a page fragment to a fresh
831 * sk_buff. If a positive drop threshold is supplied packets are dropped
832 * and their buffers recycled if (a) the number of remaining buffers is
833 * under the threshold and the packet is too big to copy, or (b) there's
836 * Note: this function is similar to @get_packet but deals with Rx buffers
837 * that are page chunks rather than sk_buffs.
839 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
840 struct sge_rspq *q, unsigned int len,
841 unsigned int drop_thres)
843 struct sk_buff *newskb, *skb;
844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
846 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
848 newskb = skb = q->pg_skb;
849 if (!skb && (len <= SGE_RX_COPY_THRES)) {
850 newskb = alloc_skb(len, GFP_ATOMIC);
851 if (likely(newskb != NULL)) {
852 __skb_put(newskb, len);
853 dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
854 len, DMA_FROM_DEVICE);
855 memcpy(newskb->data, sd->pg_chunk.va, len);
856 dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
857 len, DMA_FROM_DEVICE);
858 } else if (!drop_thres)
862 recycle_rx_buf(adap, fl, fl->cidx);
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
870 prefetch(sd->pg_chunk.p_cnt);
873 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
875 if (unlikely(!newskb)) {
881 dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
883 (*sd->pg_chunk.p_cnt)--;
884 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
885 dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
886 fl->alloc_size, DMA_FROM_DEVICE);
888 __skb_put(newskb, SGE_RX_PULL_LEN);
889 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
890 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
891 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
892 len - SGE_RX_PULL_LEN);
894 newskb->data_len = len - SGE_RX_PULL_LEN;
895 newskb->truesize += newskb->data_len;
897 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
899 sd->pg_chunk.offset, len);
901 newskb->data_len += len;
902 newskb->truesize += len;
907 * We do not refill FLs here, we let the caller do it to overlap a
914 * get_imm_packet - return the next ingress packet buffer from a response
915 * @resp: the response descriptor containing the packet data
917 * Return a packet containing the immediate data of the given response.
919 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
921 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
924 __skb_put(skb, IMMED_PKT_SIZE);
925 BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate));
926 skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE);
932 * calc_tx_descs - calculate the number of Tx descriptors for a packet
935 * Returns the number of Tx descriptors needed for the given Ethernet
936 * packet. Ethernet packets require addition of WR and CPL headers.
938 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
942 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
945 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
946 if (skb_shinfo(skb)->gso_size)
948 return flits_to_desc(flits);
951 /* map_skb - map a packet main body and its page fragments
952 * @pdev: the PCI device
954 * @addr: placeholder to save the mapped addresses
956 * map the main body of an sk_buff and its page fragments, if any.
958 static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
961 const skb_frag_t *fp, *end;
962 const struct skb_shared_info *si;
964 if (skb_headlen(skb)) {
965 *addr = dma_map_single(&pdev->dev, skb->data,
966 skb_headlen(skb), DMA_TO_DEVICE);
967 if (dma_mapping_error(&pdev->dev, *addr))
972 si = skb_shinfo(skb);
973 end = &si->frags[si->nr_frags];
975 for (fp = si->frags; fp < end; fp++) {
976 *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
978 if (dma_mapping_error(&pdev->dev, *addr))
985 while (fp-- > si->frags)
986 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
989 dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb),
996 * write_sgl - populate a scatter/gather list for a packet
998 * @sgp: the SGL to populate
999 * @start: start address of skb main body data to include in the SGL
1000 * @len: length of skb main body data to include in the SGL
1001 * @addr: the list of the mapped addresses
1003 * Copies the scatter/gather list for the buffers that make up a packet
1004 * and returns the SGL size in 8-byte words. The caller must size the SGL
1007 static inline unsigned int write_sgl(const struct sk_buff *skb,
1008 struct sg_ent *sgp, unsigned char *start,
1009 unsigned int len, const dma_addr_t *addr)
1011 unsigned int i, j = 0, k = 0, nfrags;
1014 sgp->len[0] = cpu_to_be32(len);
1015 sgp->addr[j++] = cpu_to_be64(addr[k++]);
1018 nfrags = skb_shinfo(skb)->nr_frags;
1019 for (i = 0; i < nfrags; i++) {
1020 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1022 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1023 sgp->addr[j] = cpu_to_be64(addr[k++]);
1030 return ((nfrags + (len != 0)) * 3) / 2 + j;
1034 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1035 * @adap: the adapter
1038 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1039 * where the HW is going to sleep just after we checked, however,
1040 * then the interrupt handler will detect the outstanding TX packet
1041 * and ring the doorbell for us.
1043 * When GTS is disabled we unconditionally ring the doorbell.
1045 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1048 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1049 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1050 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1051 t3_write_reg(adap, A_SG_KDOORBELL,
1052 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1055 wmb(); /* write descriptors before telling HW */
1056 t3_write_reg(adap, A_SG_KDOORBELL,
1057 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1061 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1063 #if SGE_NUM_GENBITS == 2
1064 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1069 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1070 * @ndesc: number of Tx descriptors spanned by the SGL
1071 * @skb: the packet corresponding to the WR
1072 * @d: first Tx descriptor to be written
1073 * @pidx: index of above descriptors
1074 * @q: the SGE Tx queue
1076 * @flits: number of flits to the start of the SGL in the first descriptor
1077 * @sgl_flits: the SGL size in flits
1078 * @gen: the Tx descriptor generation
1079 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1080 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1082 * Write a work request header and an associated SGL. If the SGL is
1083 * small enough to fit into one Tx descriptor it has already been written
1084 * and we just need to write the WR header. Otherwise we distribute the
1085 * SGL across the number of descriptors it spans.
1087 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1088 struct tx_desc *d, unsigned int pidx,
1089 const struct sge_txq *q,
1090 const struct sg_ent *sgl,
1091 unsigned int flits, unsigned int sgl_flits,
1092 unsigned int gen, __be32 wr_hi,
1095 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1096 struct tx_sw_desc *sd = &q->sdesc[pidx];
1099 if (need_skb_unmap()) {
1105 if (likely(ndesc == 1)) {
1107 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1108 V_WR_SGLSFLT(flits)) | wr_hi;
1110 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1111 V_WR_GEN(gen)) | wr_lo;
1114 unsigned int ogen = gen;
1115 const u64 *fp = (const u64 *)sgl;
1116 struct work_request_hdr *wp = wrp;
1118 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1119 V_WR_SGLSFLT(flits)) | wr_hi;
1122 unsigned int avail = WR_FLITS - flits;
1124 if (avail > sgl_flits)
1126 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1136 if (++pidx == q->size) {
1144 wrp = (struct work_request_hdr *)d;
1145 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1146 V_WR_SGLSFLT(1)) | wr_hi;
1147 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1149 V_WR_GEN(gen)) | wr_lo;
1154 wrp->wr_hi |= htonl(F_WR_EOP);
1156 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1157 wr_gen2((struct tx_desc *)wp, ogen);
1158 WARN_ON(ndesc != 0);
1163 * write_tx_pkt_wr - write a TX_PKT work request
1164 * @adap: the adapter
1165 * @skb: the packet to send
1166 * @pi: the egress interface
1167 * @pidx: index of the first Tx descriptor to write
1168 * @gen: the generation value to use
1170 * @ndesc: number of descriptors the packet will occupy
1171 * @compl: the value of the COMPL bit to use
1174 * Generate a TX_PKT work request to send the supplied packet.
1176 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1177 const struct port_info *pi,
1178 unsigned int pidx, unsigned int gen,
1179 struct sge_txq *q, unsigned int ndesc,
1180 unsigned int compl, const dma_addr_t *addr)
1182 unsigned int flits, sgl_flits, cntrl, tso_info;
1183 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1184 struct tx_desc *d = &q->desc[pidx];
1185 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1187 cpl->len = htonl(skb->len);
1188 cntrl = V_TXPKT_INTF(pi->port_id);
1190 if (skb_vlan_tag_present(skb))
1191 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1193 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1196 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1199 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1200 hdr->cntrl = htonl(cntrl);
1201 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1202 CPL_ETH_II : CPL_ETH_II_VLAN;
1203 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1204 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1205 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1206 hdr->lso_info = htonl(tso_info);
1209 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1210 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1211 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1212 cpl->cntrl = htonl(cntrl);
1214 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1215 q->sdesc[pidx].skb = NULL;
1217 skb_copy_from_linear_data(skb, &d->flit[2],
1220 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1222 flits = (skb->len + 7) / 8 + 2;
1223 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1224 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1225 | F_WR_SOP | F_WR_EOP | compl);
1227 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1228 V_WR_TID(q->token));
1230 dev_consume_skb_any(skb);
1237 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1238 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1240 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1241 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1242 htonl(V_WR_TID(q->token)));
1245 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1246 struct sge_qset *qs, struct sge_txq *q)
1248 netif_tx_stop_queue(txq);
1249 set_bit(TXQ_ETH, &qs->txq_stopped);
1254 * t3_eth_xmit - add a packet to the Ethernet Tx queue
1256 * @dev: the egress net device
1258 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1260 netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1263 unsigned int ndesc, pidx, credits, gen, compl;
1264 const struct port_info *pi = netdev_priv(dev);
1265 struct adapter *adap = pi->adapter;
1266 struct netdev_queue *txq;
1267 struct sge_qset *qs;
1269 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1272 * The chip min packet length is 9 octets but play safe and reject
1273 * anything shorter than an Ethernet header.
1275 if (unlikely(skb->len < ETH_HLEN)) {
1276 dev_kfree_skb_any(skb);
1277 return NETDEV_TX_OK;
1280 qidx = skb_get_queue_mapping(skb);
1282 q = &qs->txq[TXQ_ETH];
1283 txq = netdev_get_tx_queue(dev, qidx);
1285 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1287 credits = q->size - q->in_use;
1288 ndesc = calc_tx_descs(skb);
1290 if (unlikely(credits < ndesc)) {
1291 t3_stop_tx_queue(txq, qs, q);
1292 dev_err(&adap->pdev->dev,
1293 "%s: Tx ring %u full while queue awake!\n",
1294 dev->name, q->cntxt_id & 7);
1295 return NETDEV_TX_BUSY;
1298 /* Check if ethernet packet can't be sent as immediate data */
1299 if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
1300 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1302 return NETDEV_TX_OK;
1307 if (unlikely(credits - ndesc < q->stop_thres)) {
1308 t3_stop_tx_queue(txq, qs, q);
1310 if (should_restart_tx(q) &&
1311 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1313 netif_tx_start_queue(txq);
1318 q->unacked += ndesc;
1319 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1323 if (q->pidx >= q->size) {
1328 /* update port statistics */
1329 if (skb->ip_summed == CHECKSUM_PARTIAL)
1330 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1331 if (skb_shinfo(skb)->gso_size)
1332 qs->port_stats[SGE_PSTAT_TSO]++;
1333 if (skb_vlan_tag_present(skb))
1334 qs->port_stats[SGE_PSTAT_VLANINS]++;
1337 * We do not use Tx completion interrupts to free DMAd Tx packets.
1338 * This is good for performance but means that we rely on new Tx
1339 * packets arriving to run the destructors of completed packets,
1340 * which open up space in their sockets' send queues. Sometimes
1341 * we do not get such new packets causing Tx to stall. A single
1342 * UDP transmitter is a good example of this situation. We have
1343 * a clean up timer that periodically reclaims completed packets
1344 * but it doesn't run often enough (nor do we want it to) to prevent
1345 * lengthy stalls. A solution to this problem is to run the
1346 * destructor early, after the packet is queued but before it's DMAd.
1347 * A cons is that we lie to socket memory accounting, but the amount
1348 * of extra memory is reasonable (limited by the number of Tx
1349 * descriptors), the packets do actually get freed quickly by new
1350 * packets almost always, and for protocols like TCP that wait for
1351 * acks to really free up the data the extra memory is even less.
1352 * On the positive side we run the destructors on the sending CPU
1353 * rather than on a potentially different completing CPU, usually a
1354 * good thing. We also run them without holding our Tx queue lock,
1355 * unlike what reclaim_completed_tx() would otherwise do.
1357 * Run the destructor before telling the DMA engine about the packet
1358 * to make sure it doesn't complete and get freed prematurely.
1360 if (likely(!skb_shared(skb)))
1363 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1364 check_ring_tx_db(adap, q);
1365 return NETDEV_TX_OK;
1369 * write_imm - write a packet into a Tx descriptor as immediate data
1370 * @d: the Tx descriptor to write
1372 * @len: the length of packet data to write as immediate data
1373 * @gen: the generation bit value to write
1375 * Writes a packet as immediate data into a Tx descriptor. The packet
1376 * contains a work request at its beginning. We must write the packet
1377 * carefully so the SGE doesn't read it accidentally before it's written
1380 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1381 unsigned int len, unsigned int gen)
1383 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1384 struct work_request_hdr *to = (struct work_request_hdr *)d;
1386 if (likely(!skb->data_len))
1387 memcpy(&to[1], &from[1], len - sizeof(*from));
1389 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1391 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1392 V_WR_BCNTLFLT(len & 7));
1394 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1395 V_WR_LEN((len + 7) / 8));
1401 * check_desc_avail - check descriptor availability on a send queue
1402 * @adap: the adapter
1403 * @q: the send queue
1404 * @skb: the packet needing the descriptors
1405 * @ndesc: the number of Tx descriptors needed
1406 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1408 * Checks if the requested number of Tx descriptors is available on an
1409 * SGE send queue. If the queue is already suspended or not enough
1410 * descriptors are available the packet is queued for later transmission.
1411 * Must be called with the Tx queue locked.
1413 * Returns 0 if enough descriptors are available, 1 if there aren't
1414 * enough descriptors and the packet has been queued, and 2 if the caller
1415 * needs to retry because there weren't enough descriptors at the
1416 * beginning of the call but some freed up in the mean time.
1418 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1419 struct sk_buff *skb, unsigned int ndesc,
1422 if (unlikely(!skb_queue_empty(&q->sendq))) {
1423 addq_exit:__skb_queue_tail(&q->sendq, skb);
1426 if (unlikely(q->size - q->in_use < ndesc)) {
1427 struct sge_qset *qs = txq_to_qset(q, qid);
1429 set_bit(qid, &qs->txq_stopped);
1430 smp_mb__after_atomic();
1432 if (should_restart_tx(q) &&
1433 test_and_clear_bit(qid, &qs->txq_stopped))
1443 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1444 * @q: the SGE control Tx queue
1446 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1447 * that send only immediate data (presently just the control queues) and
1448 * thus do not have any sk_buffs to release.
1450 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1452 unsigned int reclaim = q->processed - q->cleaned;
1454 q->in_use -= reclaim;
1455 q->cleaned += reclaim;
1458 static inline int immediate(const struct sk_buff *skb)
1460 return skb->len <= WR_LEN;
1464 * ctrl_xmit - send a packet through an SGE control Tx queue
1465 * @adap: the adapter
1466 * @q: the control queue
1469 * Send a packet through an SGE control Tx queue. Packets sent through
1470 * a control queue must fit entirely as immediate data in a single Tx
1471 * descriptor and have no page fragments.
1473 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1474 struct sk_buff *skb)
1477 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1479 if (unlikely(!immediate(skb))) {
1482 return NET_XMIT_SUCCESS;
1485 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1486 wrp->wr_lo = htonl(V_WR_TID(q->token));
1488 spin_lock(&q->lock);
1489 again:reclaim_completed_tx_imm(q);
1491 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1492 if (unlikely(ret)) {
1494 spin_unlock(&q->lock);
1500 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1503 if (++q->pidx >= q->size) {
1507 spin_unlock(&q->lock);
1509 t3_write_reg(adap, A_SG_KDOORBELL,
1510 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1511 return NET_XMIT_SUCCESS;
1515 * restart_ctrlq - restart a suspended control queue
1516 * @w: pointer to the work associated with this handler
1518 * Resumes transmission on a suspended Tx control queue.
1520 static void restart_ctrlq(struct work_struct *w)
1522 struct sk_buff *skb;
1523 struct sge_qset *qs = container_of(w, struct sge_qset,
1524 txq[TXQ_CTRL].qresume_task);
1525 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1527 spin_lock(&q->lock);
1528 again:reclaim_completed_tx_imm(q);
1530 while (q->in_use < q->size &&
1531 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1533 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1535 if (++q->pidx >= q->size) {
1542 if (!skb_queue_empty(&q->sendq)) {
1543 set_bit(TXQ_CTRL, &qs->txq_stopped);
1544 smp_mb__after_atomic();
1546 if (should_restart_tx(q) &&
1547 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1552 spin_unlock(&q->lock);
1554 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1555 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1559 * Send a management message through control queue 0
1561 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1565 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1572 * deferred_unmap_destructor - unmap a packet when it is freed
1575 * This is the packet destructor used for Tx packets that need to remain
1576 * mapped until they are freed rather than until their Tx descriptors are
1579 static void deferred_unmap_destructor(struct sk_buff *skb)
1582 const dma_addr_t *p;
1583 const struct skb_shared_info *si;
1584 const struct deferred_unmap_info *dui;
1586 dui = (struct deferred_unmap_info *)skb->head;
1589 if (skb_tail_pointer(skb) - skb_transport_header(skb))
1590 dma_unmap_single(&dui->pdev->dev, *p++,
1591 skb_tail_pointer(skb) - skb_transport_header(skb),
1594 si = skb_shinfo(skb);
1595 for (i = 0; i < si->nr_frags; i++)
1596 dma_unmap_page(&dui->pdev->dev, *p++,
1597 skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
1600 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1601 const struct sg_ent *sgl, int sgl_flits)
1604 struct deferred_unmap_info *dui;
1606 dui = (struct deferred_unmap_info *)skb->head;
1608 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1609 *p++ = be64_to_cpu(sgl->addr[0]);
1610 *p++ = be64_to_cpu(sgl->addr[1]);
1613 *p = be64_to_cpu(sgl->addr[0]);
1617 * write_ofld_wr - write an offload work request
1618 * @adap: the adapter
1619 * @skb: the packet to send
1621 * @pidx: index of the first Tx descriptor to write
1622 * @gen: the generation value to use
1623 * @ndesc: number of descriptors the packet will occupy
1624 * @addr: the address
1626 * Write an offload work request to send the supplied packet. The packet
1627 * data already carry the work request with most fields populated.
1629 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1630 struct sge_txq *q, unsigned int pidx,
1631 unsigned int gen, unsigned int ndesc,
1632 const dma_addr_t *addr)
1634 unsigned int sgl_flits, flits;
1635 struct work_request_hdr *from;
1636 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1637 struct tx_desc *d = &q->desc[pidx];
1639 if (immediate(skb)) {
1640 q->sdesc[pidx].skb = NULL;
1641 write_imm(d, skb, skb->len, gen);
1645 /* Only TX_DATA builds SGLs */
1647 from = (struct work_request_hdr *)skb->data;
1648 memcpy(&d->flit[1], &from[1],
1649 skb_transport_offset(skb) - sizeof(*from));
1651 flits = skb_transport_offset(skb) / 8;
1652 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1653 sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1654 skb_tail_pointer(skb) - skb_transport_header(skb),
1656 if (need_skb_unmap()) {
1657 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1658 skb->destructor = deferred_unmap_destructor;
1661 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1662 gen, from->wr_hi, from->wr_lo);
1666 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1669 * Returns the number of Tx descriptors needed for the given offload
1670 * packet. These packets are already fully constructed.
1672 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1674 unsigned int flits, cnt;
1676 if (skb->len <= WR_LEN)
1677 return 1; /* packet fits as immediate data */
1679 flits = skb_transport_offset(skb) / 8; /* headers */
1680 cnt = skb_shinfo(skb)->nr_frags;
1681 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1683 return flits_to_desc(flits + sgl_len(cnt));
1687 * ofld_xmit - send a packet through an offload queue
1688 * @adap: the adapter
1689 * @q: the Tx offload queue
1692 * Send an offload packet through an SGE offload queue.
1694 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1695 struct sk_buff *skb)
1698 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1700 spin_lock(&q->lock);
1701 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1703 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1704 if (unlikely(ret)) {
1706 skb->priority = ndesc; /* save for restart */
1707 spin_unlock(&q->lock);
1713 if (!immediate(skb) &&
1714 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1715 spin_unlock(&q->lock);
1716 return NET_XMIT_SUCCESS;
1723 if (q->pidx >= q->size) {
1727 spin_unlock(&q->lock);
1729 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1730 check_ring_tx_db(adap, q);
1731 return NET_XMIT_SUCCESS;
1735 * restart_offloadq - restart a suspended offload queue
1736 * @w: pointer to the work associated with this handler
1738 * Resumes transmission on a suspended Tx offload queue.
1740 static void restart_offloadq(struct work_struct *w)
1742 struct sk_buff *skb;
1743 struct sge_qset *qs = container_of(w, struct sge_qset,
1744 txq[TXQ_OFLD].qresume_task);
1745 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1746 const struct port_info *pi = netdev_priv(qs->netdev);
1747 struct adapter *adap = pi->adapter;
1748 unsigned int written = 0;
1750 spin_lock(&q->lock);
1751 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1753 while ((skb = skb_peek(&q->sendq)) != NULL) {
1754 unsigned int gen, pidx;
1755 unsigned int ndesc = skb->priority;
1757 if (unlikely(q->size - q->in_use < ndesc)) {
1758 set_bit(TXQ_OFLD, &qs->txq_stopped);
1759 smp_mb__after_atomic();
1761 if (should_restart_tx(q) &&
1762 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1768 if (!immediate(skb) &&
1769 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1777 if (q->pidx >= q->size) {
1781 __skb_unlink(skb, &q->sendq);
1782 spin_unlock(&q->lock);
1784 write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1785 (dma_addr_t *)skb->head);
1786 spin_lock(&q->lock);
1788 spin_unlock(&q->lock);
1791 set_bit(TXQ_RUNNING, &q->flags);
1792 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1795 if (likely(written))
1796 t3_write_reg(adap, A_SG_KDOORBELL,
1797 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1801 * queue_set - return the queue set a packet should use
1804 * Maps a packet to the SGE queue set it should use. The desired queue
1805 * set is carried in bits 1-3 in the packet's priority.
1807 static inline int queue_set(const struct sk_buff *skb)
1809 return skb->priority >> 1;
1813 * is_ctrl_pkt - return whether an offload packet is a control packet
1816 * Determines whether an offload packet should use an OFLD or a CTRL
1817 * Tx queue. This is indicated by bit 0 in the packet's priority.
1819 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1821 return skb->priority & 1;
1825 * t3_offload_tx - send an offload packet
1826 * @tdev: the offload device to send to
1829 * Sends an offload packet. We use the packet priority to select the
1830 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1831 * should be sent as regular or control, bits 1-3 select the queue set.
1833 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1835 struct adapter *adap = tdev2adap(tdev);
1836 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1838 if (unlikely(is_ctrl_pkt(skb)))
1839 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1841 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1845 * offload_enqueue - add an offload packet to an SGE offload receive queue
1846 * @q: the SGE response queue
1849 * Add a new offload packet to an SGE response queue's offload packet
1850 * queue. If the packet is the first on the queue it schedules the RX
1851 * softirq to process the queue.
1853 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1855 int was_empty = skb_queue_empty(&q->rx_queue);
1857 __skb_queue_tail(&q->rx_queue, skb);
1860 struct sge_qset *qs = rspq_to_qset(q);
1862 napi_schedule(&qs->napi);
1867 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1868 * @tdev: the offload device that will be receiving the packets
1869 * @q: the SGE response queue that assembled the bundle
1870 * @skbs: the partial bundle
1871 * @n: the number of packets in the bundle
1873 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1875 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1877 struct sk_buff *skbs[], int n)
1880 q->offload_bundles++;
1881 tdev->recv(tdev, skbs, n);
1886 * ofld_poll - NAPI handler for offload packets in interrupt mode
1887 * @napi: the network device doing the polling
1888 * @budget: polling budget
1890 * The NAPI handler for offload packets when a response queue is serviced
1891 * by the hard interrupt handler, i.e., when it's operating in non-polling
1892 * mode. Creates small packet batches and sends them through the offload
1893 * receive handler. Batches need to be of modest size as we do prefetches
1894 * on the packets in each.
1896 static int ofld_poll(struct napi_struct *napi, int budget)
1898 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1899 struct sge_rspq *q = &qs->rspq;
1900 struct adapter *adapter = qs->adap;
1903 while (work_done < budget) {
1904 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1905 struct sk_buff_head queue;
1908 spin_lock_irq(&q->lock);
1909 __skb_queue_head_init(&queue);
1910 skb_queue_splice_init(&q->rx_queue, &queue);
1911 if (skb_queue_empty(&queue)) {
1912 napi_complete_done(napi, work_done);
1913 spin_unlock_irq(&q->lock);
1916 spin_unlock_irq(&q->lock);
1919 skb_queue_walk_safe(&queue, skb, tmp) {
1920 if (work_done >= budget)
1924 __skb_unlink(skb, &queue);
1925 prefetch(skb->data);
1926 skbs[ngathered] = skb;
1927 if (++ngathered == RX_BUNDLE_SIZE) {
1928 q->offload_bundles++;
1929 adapter->tdev.recv(&adapter->tdev, skbs,
1934 if (!skb_queue_empty(&queue)) {
1935 /* splice remaining packets back onto Rx queue */
1936 spin_lock_irq(&q->lock);
1937 skb_queue_splice(&queue, &q->rx_queue);
1938 spin_unlock_irq(&q->lock);
1940 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1947 * rx_offload - process a received offload packet
1948 * @tdev: the offload device receiving the packet
1949 * @rq: the response queue that received the packet
1951 * @rx_gather: a gather list of packets if we are building a bundle
1952 * @gather_idx: index of the next available slot in the bundle
1954 * Process an ingress offload packet and add it to the offload ingress
1955 * queue. Returns the index of the next available slot in the bundle.
1957 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1958 struct sk_buff *skb, struct sk_buff *rx_gather[],
1959 unsigned int gather_idx)
1961 skb_reset_mac_header(skb);
1962 skb_reset_network_header(skb);
1963 skb_reset_transport_header(skb);
1966 rx_gather[gather_idx++] = skb;
1967 if (gather_idx == RX_BUNDLE_SIZE) {
1968 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1970 rq->offload_bundles++;
1973 offload_enqueue(rq, skb);
1979 * restart_tx - check whether to restart suspended Tx queues
1980 * @qs: the queue set to resume
1982 * Restarts suspended Tx queues of an SGE queue set if they have enough
1983 * free resources to resume operation.
1985 static void restart_tx(struct sge_qset *qs)
1987 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1988 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1989 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1990 qs->txq[TXQ_ETH].restarts++;
1991 if (netif_running(qs->netdev))
1992 netif_tx_wake_queue(qs->tx_q);
1995 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1996 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1997 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1998 qs->txq[TXQ_OFLD].restarts++;
2000 /* The work can be quite lengthy so we use driver's own queue */
2001 queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
2003 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2004 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2005 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2006 qs->txq[TXQ_CTRL].restarts++;
2008 /* The work can be quite lengthy so we use driver's own queue */
2009 queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
2014 * cxgb3_arp_process - process an ARP request probing a private IP address
2015 * @pi: the port info
2016 * @skb: the skbuff containing the ARP request
2018 * Check if the ARP request is probing the private IP address
2019 * dedicated to iSCSI, generate an ARP reply if so.
2021 static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2023 struct net_device *dev = skb->dev;
2025 unsigned char *arp_ptr;
2032 skb_reset_network_header(skb);
2035 if (arp->ar_op != htons(ARPOP_REQUEST))
2038 arp_ptr = (unsigned char *)(arp + 1);
2040 arp_ptr += dev->addr_len;
2041 memcpy(&sip, arp_ptr, sizeof(sip));
2042 arp_ptr += sizeof(sip);
2043 arp_ptr += dev->addr_len;
2044 memcpy(&tip, arp_ptr, sizeof(tip));
2046 if (tip != pi->iscsi_ipv4addr)
2049 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2050 pi->iscsic.mac_addr, sha);
2054 static inline int is_arp(struct sk_buff *skb)
2056 return skb->protocol == htons(ETH_P_ARP);
2059 static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2060 struct sk_buff *skb)
2063 cxgb3_arp_process(pi, skb);
2067 if (pi->iscsic.recv)
2068 pi->iscsic.recv(pi, skb);
2073 * rx_eth - process an ingress ethernet packet
2074 * @adap: the adapter
2075 * @rq: the response queue that received the packet
2078 * @lro: large receive offload
2080 * Process an ingress ethernet packet and deliver it to the stack.
2081 * The padding is 2 if the packet was delivered in an Rx buffer and 0
2082 * if it was immediate data in a response.
2084 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2085 struct sk_buff *skb, int pad, int lro)
2087 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2088 struct sge_qset *qs = rspq_to_qset(rq);
2089 struct port_info *pi;
2091 skb_pull(skb, sizeof(*p) + pad);
2092 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2093 pi = netdev_priv(skb->dev);
2094 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2095 p->csum == htons(0xffff) && !p->fragment) {
2096 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2097 skb->ip_summed = CHECKSUM_UNNECESSARY;
2099 skb_checksum_none_assert(skb);
2100 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2102 if (p->vlan_valid) {
2103 qs->port_stats[SGE_PSTAT_VLANEX]++;
2104 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2108 napi_gro_receive(&qs->napi, skb);
2110 if (unlikely(pi->iscsic.flags))
2111 cxgb3_process_iscsi_prov_pack(pi, skb);
2112 netif_receive_skb(skb);
2118 static inline int is_eth_tcp(u32 rss)
2120 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2124 * lro_add_page - add a page chunk to an LRO session
2125 * @adap: the adapter
2126 * @qs: the associated queue set
2127 * @fl: the free list containing the page chunk to add
2128 * @len: packet length
2129 * @complete: Indicates the last fragment of a frame
2131 * Add a received packet contained in a page chunk to an existing LRO
2134 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2135 struct sge_fl *fl, int len, int complete)
2137 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2138 struct port_info *pi = netdev_priv(qs->netdev);
2139 struct sk_buff *skb = NULL;
2140 struct cpl_rx_pkt *cpl;
2141 skb_frag_t *rx_frag;
2146 skb = napi_get_frags(&qs->napi);
2152 dma_sync_single_for_cpu(&adap->pdev->dev,
2153 dma_unmap_addr(sd, dma_addr),
2154 fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
2156 (*sd->pg_chunk.p_cnt)--;
2157 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2158 dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
2159 fl->alloc_size, DMA_FROM_DEVICE);
2162 put_page(sd->pg_chunk.page);
2168 rx_frag = skb_shinfo(skb)->frags;
2169 nr_frags = skb_shinfo(skb)->nr_frags;
2172 offset = 2 + sizeof(struct cpl_rx_pkt);
2173 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2175 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2176 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2177 skb->ip_summed = CHECKSUM_UNNECESSARY;
2178 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2180 skb->ip_summed = CHECKSUM_NONE;
2186 rx_frag += nr_frags;
2187 skb_frag_fill_page_desc(rx_frag, sd->pg_chunk.page,
2188 sd->pg_chunk.offset + offset, len);
2191 skb->data_len += len;
2192 skb->truesize += len;
2193 skb_shinfo(skb)->nr_frags++;
2198 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2200 if (cpl->vlan_valid) {
2201 qs->port_stats[SGE_PSTAT_VLANEX]++;
2202 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
2204 napi_gro_frags(&qs->napi);
2208 * handle_rsp_cntrl_info - handles control information in a response
2209 * @qs: the queue set corresponding to the response
2210 * @flags: the response control flags
2212 * Handles the control information of an SGE response, such as GTS
2213 * indications and completion credits for the queue set's Tx queues.
2214 * HW coalesces credits, we don't do any extra SW coalescing.
2216 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2218 unsigned int credits;
2221 if (flags & F_RSPD_TXQ0_GTS)
2222 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2225 credits = G_RSPD_TXQ0_CR(flags);
2227 qs->txq[TXQ_ETH].processed += credits;
2229 credits = G_RSPD_TXQ2_CR(flags);
2231 qs->txq[TXQ_CTRL].processed += credits;
2234 if (flags & F_RSPD_TXQ1_GTS)
2235 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2237 credits = G_RSPD_TXQ1_CR(flags);
2239 qs->txq[TXQ_OFLD].processed += credits;
2243 * check_ring_db - check if we need to ring any doorbells
2244 * @adap: the adapter
2245 * @qs: the queue set whose Tx queues are to be examined
2246 * @sleeping: indicates which Tx queue sent GTS
2248 * Checks if some of a queue set's Tx queues need to ring their doorbells
2249 * to resume transmission after idling while they still have unprocessed
2252 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2253 unsigned int sleeping)
2255 if (sleeping & F_RSPD_TXQ0_GTS) {
2256 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2258 if (txq->cleaned + txq->in_use != txq->processed &&
2259 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2260 set_bit(TXQ_RUNNING, &txq->flags);
2261 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2262 V_EGRCNTX(txq->cntxt_id));
2266 if (sleeping & F_RSPD_TXQ1_GTS) {
2267 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2269 if (txq->cleaned + txq->in_use != txq->processed &&
2270 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2271 set_bit(TXQ_RUNNING, &txq->flags);
2272 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2273 V_EGRCNTX(txq->cntxt_id));
2279 * is_new_response - check if a response is newly written
2280 * @r: the response descriptor
2281 * @q: the response queue
2283 * Returns true if a response descriptor contains a yet unprocessed
2286 static inline int is_new_response(const struct rsp_desc *r,
2287 const struct sge_rspq *q)
2289 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2292 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2295 q->rx_recycle_buf = 0;
2298 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2299 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2300 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2301 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2302 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2304 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2305 #define NOMEM_INTR_DELAY 2500
2308 * process_responses - process responses from an SGE response queue
2309 * @adap: the adapter
2310 * @qs: the queue set to which the response queue belongs
2311 * @budget: how many responses can be processed in this round
2313 * Process responses from an SGE response queue up to the supplied budget.
2314 * Responses include received packets as well as credits and other events
2315 * for the queues that belong to the response queue's queue set.
2316 * A negative budget is effectively unlimited.
2318 * Additionally choose the interrupt holdoff time for the next interrupt
2319 * on this queue. If the system is under memory shortage use a fairly
2320 * long delay to help recovery.
2322 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2325 struct sge_rspq *q = &qs->rspq;
2326 struct rsp_desc *r = &q->desc[q->cidx];
2327 int budget_left = budget;
2328 unsigned int sleeping = 0;
2329 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2332 q->next_holdoff = q->holdoff_tmr;
2334 while (likely(budget_left && is_new_response(r, q))) {
2335 int packet_complete, eth, ethpad = 2;
2336 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2337 struct sk_buff *skb = NULL;
2339 __be32 rss_hi, rss_lo;
2342 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2343 rss_hi = *(const __be32 *)r;
2344 rss_lo = r->rss_hdr.rss_hash_val;
2345 flags = ntohl(r->flags);
2347 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2348 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2352 __skb_put_data(skb, r, AN_PKT_SIZE);
2353 skb->data[0] = CPL_ASYNC_NOTIF;
2354 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2356 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2357 skb = get_imm_packet(r);
2358 if (unlikely(!skb)) {
2360 q->next_holdoff = NOMEM_INTR_DELAY;
2362 /* consume one credit since we tried */
2368 } else if ((len = ntohl(r->len_cq)) != 0) {
2371 lro &= eth && is_eth_tcp(rss_hi);
2373 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2374 if (fl->use_pages) {
2375 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2378 __refill_fl(adap, fl);
2380 lro_add_page(adap, qs, fl,
2382 flags & F_RSPD_EOP);
2386 skb = get_packet_pg(adap, fl, q,
2389 SGE_RX_DROP_THRES : 0);
2392 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2393 eth ? SGE_RX_DROP_THRES : 0);
2394 if (unlikely(!skb)) {
2398 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2401 if (++fl->cidx == fl->size)
2406 if (flags & RSPD_CTRL_MASK) {
2407 sleeping |= flags & RSPD_GTS_MASK;
2408 handle_rsp_cntrl_info(qs, flags);
2412 if (unlikely(++q->cidx == q->size)) {
2419 if (++q->credits >= (q->size / 4)) {
2420 refill_rspq(adap, q, q->credits);
2424 packet_complete = flags &
2425 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2426 F_RSPD_ASYNC_NOTIF);
2428 if (skb != NULL && packet_complete) {
2430 rx_eth(adap, q, skb, ethpad, lro);
2433 /* Preserve the RSS info in csum & priority */
2435 skb->priority = rss_lo;
2436 ngathered = rx_offload(&adap->tdev, q, skb,
2441 if (flags & F_RSPD_EOP)
2442 clear_rspq_bufstate(q);
2447 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2450 check_ring_db(adap, qs, sleeping);
2452 smp_mb(); /* commit Tx queue .processed updates */
2453 if (unlikely(qs->txq_stopped != 0))
2456 budget -= budget_left;
2460 static inline int is_pure_response(const struct rsp_desc *r)
2462 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2464 return (n | r->len_cq) == 0;
2468 * napi_rx_handler - the NAPI handler for Rx processing
2469 * @napi: the napi instance
2470 * @budget: how many packets we can process in this round
2472 * Handler for new data events when using NAPI.
2474 static int napi_rx_handler(struct napi_struct *napi, int budget)
2476 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2477 struct adapter *adap = qs->adap;
2478 int work_done = process_responses(adap, qs, budget);
2480 if (likely(work_done < budget)) {
2481 napi_complete_done(napi, work_done);
2484 * Because we don't atomically flush the following
2485 * write it is possible that in very rare cases it can
2486 * reach the device in a way that races with a new
2487 * response being written plus an error interrupt
2488 * causing the NAPI interrupt handler below to return
2489 * unhandled status to the OS. To protect against
2490 * this would require flushing the write and doing
2491 * both the write and the flush with interrupts off.
2492 * Way too expensive and unjustifiable given the
2493 * rarity of the race.
2495 * The race cannot happen at all with MSI-X.
2497 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2498 V_NEWTIMER(qs->rspq.next_holdoff) |
2499 V_NEWINDEX(qs->rspq.cidx));
2505 * process_pure_responses - process pure responses from a response queue
2506 * @adap: the adapter
2507 * @qs: the queue set owning the response queue
2508 * @r: the first pure response to process
2510 * A simpler version of process_responses() that handles only pure (i.e.,
2511 * non data-carrying) responses. Such respones are too light-weight to
2512 * justify calling a softirq under NAPI, so we handle them specially in
2513 * the interrupt handler. The function is called with a pointer to a
2514 * response, which the caller must ensure is a valid pure response.
2516 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2518 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2521 struct sge_rspq *q = &qs->rspq;
2522 unsigned int sleeping = 0;
2525 u32 flags = ntohl(r->flags);
2528 if (unlikely(++q->cidx == q->size)) {
2535 if (flags & RSPD_CTRL_MASK) {
2536 sleeping |= flags & RSPD_GTS_MASK;
2537 handle_rsp_cntrl_info(qs, flags);
2541 if (++q->credits >= (q->size / 4)) {
2542 refill_rspq(adap, q, q->credits);
2545 if (!is_new_response(r, q))
2548 } while (is_pure_response(r));
2551 check_ring_db(adap, qs, sleeping);
2553 smp_mb(); /* commit Tx queue .processed updates */
2554 if (unlikely(qs->txq_stopped != 0))
2557 return is_new_response(r, q);
2561 * handle_responses - decide what to do with new responses in NAPI mode
2562 * @adap: the adapter
2563 * @q: the response queue
2565 * This is used by the NAPI interrupt handlers to decide what to do with
2566 * new SGE responses. If there are no new responses it returns -1. If
2567 * there are new responses and they are pure (i.e., non-data carrying)
2568 * it handles them straight in hard interrupt context as they are very
2569 * cheap and don't deliver any packets. Finally, if there are any data
2570 * signaling responses it schedules the NAPI handler. Returns 1 if it
2571 * schedules NAPI, 0 if all new responses were pure.
2573 * The caller must ascertain NAPI is not already running.
2575 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2577 struct sge_qset *qs = rspq_to_qset(q);
2578 struct rsp_desc *r = &q->desc[q->cidx];
2580 if (!is_new_response(r, q))
2583 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2584 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2585 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2588 napi_schedule(&qs->napi);
2593 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2594 * (i.e., response queue serviced in hard interrupt).
2596 static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2598 struct sge_qset *qs = cookie;
2599 struct adapter *adap = qs->adap;
2600 struct sge_rspq *q = &qs->rspq;
2602 spin_lock(&q->lock);
2603 if (process_responses(adap, qs, -1) == 0)
2604 q->unhandled_irqs++;
2605 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2606 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2607 spin_unlock(&q->lock);
2612 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2613 * (i.e., response queue serviced by NAPI polling).
2615 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2617 struct sge_qset *qs = cookie;
2618 struct sge_rspq *q = &qs->rspq;
2620 spin_lock(&q->lock);
2622 if (handle_responses(qs->adap, q) < 0)
2623 q->unhandled_irqs++;
2624 spin_unlock(&q->lock);
2629 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2630 * SGE response queues as well as error and other async events as they all use
2631 * the same MSI vector. We use one SGE response queue per port in this mode
2632 * and protect all response queues with queue 0's lock.
2634 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2636 int new_packets = 0;
2637 struct adapter *adap = cookie;
2638 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2640 spin_lock(&q->lock);
2642 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2643 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2644 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2648 if (adap->params.nports == 2 &&
2649 process_responses(adap, &adap->sge.qs[1], -1)) {
2650 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2652 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2653 V_NEWTIMER(q1->next_holdoff) |
2654 V_NEWINDEX(q1->cidx));
2658 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2659 q->unhandled_irqs++;
2661 spin_unlock(&q->lock);
2665 static int rspq_check_napi(struct sge_qset *qs)
2667 struct sge_rspq *q = &qs->rspq;
2669 return is_new_response(&q->desc[q->cidx], q) && napi_schedule(&qs->napi);
2673 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2674 * by NAPI polling). Handles data events from SGE response queues as well as
2675 * error and other async events as they all use the same MSI vector. We use
2676 * one SGE response queue per port in this mode and protect all response
2677 * queues with queue 0's lock.
2679 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2682 struct adapter *adap = cookie;
2683 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2685 spin_lock(&q->lock);
2687 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2688 if (adap->params.nports == 2)
2689 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2690 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2691 q->unhandled_irqs++;
2693 spin_unlock(&q->lock);
2698 * A helper function that processes responses and issues GTS.
2700 static inline int process_responses_gts(struct adapter *adap,
2701 struct sge_rspq *rq)
2705 work = process_responses(adap, rspq_to_qset(rq), -1);
2706 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2707 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2712 * The legacy INTx interrupt handler. This needs to handle data events from
2713 * SGE response queues as well as error and other async events as they all use
2714 * the same interrupt pin. We use one SGE response queue per port in this mode
2715 * and protect all response queues with queue 0's lock.
2717 static irqreturn_t t3_intr(int irq, void *cookie)
2719 int work_done, w0, w1;
2720 struct adapter *adap = cookie;
2721 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2722 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2724 spin_lock(&q0->lock);
2726 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2727 w1 = adap->params.nports == 2 &&
2728 is_new_response(&q1->desc[q1->cidx], q1);
2730 if (likely(w0 | w1)) {
2731 t3_write_reg(adap, A_PL_CLI, 0);
2732 t3_read_reg(adap, A_PL_CLI); /* flush */
2735 process_responses_gts(adap, q0);
2738 process_responses_gts(adap, q1);
2740 work_done = w0 | w1;
2742 work_done = t3_slow_intr_handler(adap);
2744 spin_unlock(&q0->lock);
2745 return IRQ_RETVAL(work_done != 0);
2749 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2750 * Handles data events from SGE response queues as well as error and other
2751 * async events as they all use the same interrupt pin. We use one SGE
2752 * response queue per port in this mode and protect all response queues with
2755 static irqreturn_t t3b_intr(int irq, void *cookie)
2758 struct adapter *adap = cookie;
2759 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2761 t3_write_reg(adap, A_PL_CLI, 0);
2762 map = t3_read_reg(adap, A_SG_DATA_INTR);
2764 if (unlikely(!map)) /* shared interrupt, most likely */
2767 spin_lock(&q0->lock);
2769 if (unlikely(map & F_ERRINTR))
2770 t3_slow_intr_handler(adap);
2772 if (likely(map & 1))
2773 process_responses_gts(adap, q0);
2776 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2778 spin_unlock(&q0->lock);
2783 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2784 * Handles data events from SGE response queues as well as error and other
2785 * async events as they all use the same interrupt pin. We use one SGE
2786 * response queue per port in this mode and protect all response queues with
2789 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2792 struct adapter *adap = cookie;
2793 struct sge_qset *qs0 = &adap->sge.qs[0];
2794 struct sge_rspq *q0 = &qs0->rspq;
2796 t3_write_reg(adap, A_PL_CLI, 0);
2797 map = t3_read_reg(adap, A_SG_DATA_INTR);
2799 if (unlikely(!map)) /* shared interrupt, most likely */
2802 spin_lock(&q0->lock);
2804 if (unlikely(map & F_ERRINTR))
2805 t3_slow_intr_handler(adap);
2807 if (likely(map & 1))
2808 napi_schedule(&qs0->napi);
2811 napi_schedule(&adap->sge.qs[1].napi);
2813 spin_unlock(&q0->lock);
2818 * t3_intr_handler - select the top-level interrupt handler
2819 * @adap: the adapter
2820 * @polling: whether using NAPI to service response queues
2822 * Selects the top-level interrupt handler based on the type of interrupts
2823 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2826 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2828 if (adap->flags & USING_MSIX)
2829 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2830 if (adap->flags & USING_MSI)
2831 return polling ? t3_intr_msi_napi : t3_intr_msi;
2832 if (adap->params.rev > 0)
2833 return polling ? t3b_intr_napi : t3b_intr;
2837 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2838 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2839 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2840 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2842 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2843 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2847 * t3_sge_err_intr_handler - SGE async event interrupt handler
2848 * @adapter: the adapter
2850 * Interrupt handler for SGE asynchronous (non-data) events.
2852 void t3_sge_err_intr_handler(struct adapter *adapter)
2854 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2857 if (status & SGE_PARERR)
2858 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2859 status & SGE_PARERR);
2860 if (status & SGE_FRAMINGERR)
2861 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2862 status & SGE_FRAMINGERR);
2864 if (status & F_RSPQCREDITOVERFOW)
2865 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2867 if (status & F_RSPQDISABLED) {
2868 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2871 "packet delivered to disabled response queue "
2872 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2875 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2876 queue_work(cxgb3_wq, &adapter->db_drop_task);
2878 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2879 queue_work(cxgb3_wq, &adapter->db_full_task);
2881 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2882 queue_work(cxgb3_wq, &adapter->db_empty_task);
2884 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2885 if (status & SGE_FATALERR)
2886 t3_fatal_err(adapter);
2890 * sge_timer_tx - perform periodic maintenance of an SGE qset
2891 * @t: a timer list containing the SGE queue set to maintain
2893 * Runs periodically from a timer to perform maintenance of an SGE queue
2894 * set. It performs two tasks:
2896 * Cleans up any completed Tx descriptors that may still be pending.
2897 * Normal descriptor cleanup happens when new packets are added to a Tx
2898 * queue so this timer is relatively infrequent and does any cleanup only
2899 * if the Tx queue has not seen any new packets in a while. We make a
2900 * best effort attempt to reclaim descriptors, in that we don't wait
2901 * around if we cannot get a queue's lock (which most likely is because
2902 * someone else is queueing new packets and so will also handle the clean
2903 * up). Since control queues use immediate data exclusively we don't
2904 * bother cleaning them up here.
2907 static void sge_timer_tx(struct timer_list *t)
2909 struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2910 struct port_info *pi = netdev_priv(qs->netdev);
2911 struct adapter *adap = pi->adapter;
2912 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2913 unsigned long next_period;
2915 if (__netif_tx_trylock(qs->tx_q)) {
2916 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2917 TX_RECLAIM_TIMER_CHUNK);
2918 __netif_tx_unlock(qs->tx_q);
2921 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2922 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2923 TX_RECLAIM_TIMER_CHUNK);
2924 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2927 next_period = TX_RECLAIM_PERIOD >>
2928 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2929 TX_RECLAIM_TIMER_CHUNK);
2930 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2934 * sge_timer_rx - perform periodic maintenance of an SGE qset
2935 * @t: the timer list containing the SGE queue set to maintain
2937 * a) Replenishes Rx queues that have run out due to memory shortage.
2938 * Normally new Rx buffers are added when existing ones are consumed but
2939 * when out of memory a queue can become empty. We try to add only a few
2940 * buffers here, the queue will be replenished fully as these new buffers
2941 * are used up if memory shortage has subsided.
2943 * b) Return coalesced response queue credits in case a response queue is
2947 static void sge_timer_rx(struct timer_list *t)
2950 struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2951 struct port_info *pi = netdev_priv(qs->netdev);
2952 struct adapter *adap = pi->adapter;
2955 lock = adap->params.rev > 0 ?
2956 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2958 if (!spin_trylock_irq(lock))
2961 if (napi_is_scheduled(&qs->napi))
2964 if (adap->params.rev < 4) {
2965 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2967 if (status & (1 << qs->rspq.cntxt_id)) {
2969 if (qs->rspq.credits) {
2971 refill_rspq(adap, &qs->rspq, 1);
2972 qs->rspq.restarted++;
2973 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2974 1 << qs->rspq.cntxt_id);
2979 if (qs->fl[0].credits < qs->fl[0].size)
2980 __refill_fl(adap, &qs->fl[0]);
2981 if (qs->fl[1].credits < qs->fl[1].size)
2982 __refill_fl(adap, &qs->fl[1]);
2985 spin_unlock_irq(lock);
2987 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2991 * t3_update_qset_coalesce - update coalescing settings for a queue set
2992 * @qs: the SGE queue set
2993 * @p: new queue set parameters
2995 * Update the coalescing settings for an SGE queue set. Nothing is done
2996 * if the queue set is not initialized yet.
2998 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3000 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3001 qs->rspq.polling = p->polling;
3002 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3006 * t3_sge_alloc_qset - initialize an SGE queue set
3007 * @adapter: the adapter
3008 * @id: the queue set id
3009 * @nports: how many Ethernet ports will be using this queue set
3010 * @irq_vec_idx: the IRQ vector index for response queue interrupts
3011 * @p: configuration parameters for this queue set
3012 * @ntxq: number of Tx queues for the queue set
3013 * @dev: net device associated with this queue set
3014 * @netdevq: net device TX queue associated with this queue set
3016 * Allocate resources and initialize an SGE queue set. A queue set
3017 * comprises a response queue, two Rx free-buffer queues, and up to 3
3018 * Tx queues. The Tx queues are assigned roles in the order Ethernet
3019 * queue, offload queue, and control queue.
3021 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3022 int irq_vec_idx, const struct qset_params *p,
3023 int ntxq, struct net_device *dev,
3024 struct netdev_queue *netdevq)
3026 int i, avail, ret = -ENOMEM;
3027 struct sge_qset *q = &adapter->sge.qs[id];
3029 init_qset_cntxt(q, id);
3030 timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
3031 timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
3033 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3034 sizeof(struct rx_desc),
3035 sizeof(struct rx_sw_desc),
3036 &q->fl[0].phys_addr, &q->fl[0].sdesc);
3040 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3041 sizeof(struct rx_desc),
3042 sizeof(struct rx_sw_desc),
3043 &q->fl[1].phys_addr, &q->fl[1].sdesc);
3047 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3048 sizeof(struct rsp_desc), 0,
3049 &q->rspq.phys_addr, NULL);
3053 for (i = 0; i < ntxq; ++i) {
3055 * The control queue always uses immediate data so does not
3056 * need to keep track of any sk_buffs.
3058 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3060 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3061 sizeof(struct tx_desc), sz,
3062 &q->txq[i].phys_addr,
3064 if (!q->txq[i].desc)
3068 q->txq[i].size = p->txq_size[i];
3069 spin_lock_init(&q->txq[i].lock);
3070 skb_queue_head_init(&q->txq[i].sendq);
3073 INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
3074 INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
3076 q->fl[0].gen = q->fl[1].gen = 1;
3077 q->fl[0].size = p->fl_size;
3078 q->fl[1].size = p->jumbo_size;
3081 q->rspq.size = p->rspq_size;
3082 spin_lock_init(&q->rspq.lock);
3083 skb_queue_head_init(&q->rspq.rx_queue);
3085 q->txq[TXQ_ETH].stop_thres = nports *
3086 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3088 #if FL0_PG_CHUNK_SIZE > 0
3089 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3091 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3093 #if FL1_PG_CHUNK_SIZE > 0
3094 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3096 q->fl[1].buf_size = is_offload(adapter) ?
3097 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3098 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3101 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3102 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3103 q->fl[0].order = FL0_PG_ORDER;
3104 q->fl[1].order = FL1_PG_ORDER;
3105 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3106 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3108 spin_lock_irq(&adapter->sge.reg_lock);
3110 /* FL threshold comparison uses < */
3111 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3112 q->rspq.phys_addr, q->rspq.size,
3113 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3117 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3118 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3119 q->fl[i].phys_addr, q->fl[i].size,
3120 q->fl[i].buf_size - SGE_PG_RSVD,
3121 p->cong_thres, 1, 0);
3126 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3127 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3128 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3134 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3135 USE_GTS, SGE_CNTXT_OFLD, id,
3136 q->txq[TXQ_OFLD].phys_addr,
3137 q->txq[TXQ_OFLD].size, 0, 1, 0);
3143 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3145 q->txq[TXQ_CTRL].phys_addr,
3146 q->txq[TXQ_CTRL].size,
3147 q->txq[TXQ_CTRL].token, 1, 0);
3152 spin_unlock_irq(&adapter->sge.reg_lock);
3157 t3_update_qset_coalesce(q, p);
3159 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3160 GFP_KERNEL | __GFP_COMP);
3162 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3166 if (avail < q->fl[0].size)
3167 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3170 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3171 GFP_KERNEL | __GFP_COMP);
3172 if (avail < q->fl[1].size)
3173 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3175 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3177 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3178 V_NEWTIMER(q->rspq.holdoff_tmr));
3183 spin_unlock_irq(&adapter->sge.reg_lock);
3185 t3_free_qset(adapter, q);
3190 * t3_start_sge_timers - start SGE timer call backs
3191 * @adap: the adapter
3193 * Starts each SGE queue set's timer call back
3195 void t3_start_sge_timers(struct adapter *adap)
3199 for (i = 0; i < SGE_QSETS; ++i) {
3200 struct sge_qset *q = &adap->sge.qs[i];
3202 if (q->tx_reclaim_timer.function)
3203 mod_timer(&q->tx_reclaim_timer,
3204 jiffies + TX_RECLAIM_PERIOD);
3206 if (q->rx_reclaim_timer.function)
3207 mod_timer(&q->rx_reclaim_timer,
3208 jiffies + RX_RECLAIM_PERIOD);
3213 * t3_stop_sge_timers - stop SGE timer call backs
3214 * @adap: the adapter
3216 * Stops each SGE queue set's timer call back
3218 void t3_stop_sge_timers(struct adapter *adap)
3222 for (i = 0; i < SGE_QSETS; ++i) {
3223 struct sge_qset *q = &adap->sge.qs[i];
3225 if (q->tx_reclaim_timer.function)
3226 del_timer_sync(&q->tx_reclaim_timer);
3227 if (q->rx_reclaim_timer.function)
3228 del_timer_sync(&q->rx_reclaim_timer);
3233 * t3_free_sge_resources - free SGE resources
3234 * @adap: the adapter
3236 * Frees resources used by the SGE queue sets.
3238 void t3_free_sge_resources(struct adapter *adap)
3242 for (i = 0; i < SGE_QSETS; ++i)
3243 t3_free_qset(adap, &adap->sge.qs[i]);
3247 * t3_sge_start - enable SGE
3248 * @adap: the adapter
3250 * Enables the SGE for DMAs. This is the last step in starting packet
3253 void t3_sge_start(struct adapter *adap)
3255 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3259 * t3_sge_stop_dma - Disable SGE DMA engine operation
3260 * @adap: the adapter
3262 * Can be invoked from interrupt context e.g. error handler.
3264 * Note that this function cannot disable the restart of works as
3265 * it cannot wait if called from interrupt context, however the
3266 * works will have no effect since the doorbells are disabled. The
3267 * driver will call tg3_sge_stop() later from process context, at
3268 * which time the works will be stopped if they are still running.
3270 void t3_sge_stop_dma(struct adapter *adap)
3272 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3276 * t3_sge_stop - disable SGE operation completly
3277 * @adap: the adapter
3279 * Called from process context. Disables the DMA engine and any
3280 * pending queue restart works.
3282 void t3_sge_stop(struct adapter *adap)
3286 t3_sge_stop_dma(adap);
3288 /* workqueues aren't initialized otherwise */
3289 if (!(adap->flags & FULL_INIT_DONE))
3291 for (i = 0; i < SGE_QSETS; ++i) {
3292 struct sge_qset *qs = &adap->sge.qs[i];
3294 cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
3295 cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
3300 * t3_sge_init - initialize SGE
3301 * @adap: the adapter
3302 * @p: the SGE parameters
3304 * Performs SGE initialization needed every time after a chip reset.
3305 * We do not initialize any of the queue sets here, instead the driver
3306 * top-level must request those individually. We also do not enable DMA
3307 * here, that should be done after the queues have been set up.
3309 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3311 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3313 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3314 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3315 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3316 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3317 #if SGE_NUM_GENBITS == 1
3318 ctrl |= F_EGRGENCTRL;
3320 if (adap->params.rev > 0) {
3321 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3322 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3324 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3325 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3326 V_LORCQDRBTHRSH(512));
3327 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3328 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3329 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3330 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3331 adap->params.rev < T3_REV_C ? 1000 : 500);
3332 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3333 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3334 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3335 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3336 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3340 * t3_sge_prep - one-time SGE initialization
3341 * @adap: the associated adapter
3342 * @p: SGE parameters
3344 * Performs one-time initialization of SGE SW state. Includes determining
3345 * defaults for the assorted SGE parameters, which admins can change until
3346 * they are used to initialize the SGE.
3348 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3352 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3353 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3355 for (i = 0; i < SGE_QSETS; ++i) {
3356 struct qset_params *q = p->qset + i;
3358 q->polling = adap->params.rev > 0;
3359 q->coalesce_usecs = 5;
3360 q->rspq_size = 1024;
3362 q->jumbo_size = 512;
3363 q->txq_size[TXQ_ETH] = 1024;
3364 q->txq_size[TXQ_OFLD] = 1024;
3365 q->txq_size[TXQ_CTRL] = 256;
3369 spin_lock_init(&adap->sge.reg_lock);