2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
46 #include <net/busy_poll.h>
47 #ifdef CONFIG_CHELSIO_T4_FCOE
48 #include <scsi/fc/fc_fcoe.h>
49 #endif /* CONFIG_CHELSIO_T4_FCOE */
52 #include "t4_values.h"
55 #include "cxgb4_ptp.h"
58 * Rx buffer size. We use largish buffers if possible but settle for single
59 * pages under memory shortage.
62 # define FL_PG_ORDER 0
64 # define FL_PG_ORDER (16 - PAGE_SHIFT)
67 /* RX_PULL_LEN should be <= RX_COPY_THRES */
68 #define RX_COPY_THRES 256
69 #define RX_PULL_LEN 128
72 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
73 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
75 #define RX_PKT_SKB_LEN 512
78 * Max number of Tx descriptors we clean up at a time. Should be modest as
79 * freeing skbs isn't cheap and it happens while holding locks. We just need
80 * to free packets faster than they arrive, we eventually catch up and keep
81 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
83 #define MAX_TX_RECLAIM 16
86 * Max number of Rx buffers we replenish at a time. Again keep this modest,
87 * allocating buffers isn't cheap either.
89 #define MAX_RX_REFILL 16U
92 * Period of the Rx queue check timer. This timer is infrequent as it has
93 * something to do only when the system experiences severe memory shortage.
95 #define RX_QCHECK_PERIOD (HZ / 2)
98 * Period of the Tx queue check timer.
100 #define TX_QCHECK_PERIOD (HZ / 2)
103 * Max number of Tx descriptors to be reclaimed by the Tx timer.
105 #define MAX_TIMER_TX_RECLAIM 100
108 * Timer index used when backing off due to memory shortage.
110 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
113 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
114 * This is the same as calc_tx_descs() for a TSO packet with
115 * nr_frags == MAX_SKB_FRAGS.
117 #define ETHTXQ_STOP_THRES \
118 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
121 * Suspension threshold for non-Ethernet Tx queues. We require enough room
122 * for a full sized WR.
124 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
127 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
130 #define MAX_IMM_TX_PKT_LEN 256
133 * Max size of a WR sent through a control Tx queue.
135 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
137 struct tx_sw_desc { /* SW state per Tx descriptor */
139 struct ulptx_sgl *sgl;
142 struct rx_sw_desc { /* SW state per Rx descriptor */
148 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
149 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
150 * We could easily support more but there doesn't seem to be much need for
153 #define FL_MTU_SMALL 1500
154 #define FL_MTU_LARGE 9000
156 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
159 struct sge *s = &adapter->sge;
161 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
164 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
165 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
168 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
169 * these to specify the buffer size as an index into the SGE Free List Buffer
170 * Size register array. We also use bit 4, when the buffer has been unmapped
171 * for DMA, but this is of course never sent to the hardware and is only used
172 * to prevent double unmappings. All of the above requires that the Free List
173 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
174 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
175 * Free List Buffer alignment is 32 bytes, this works out for us ...
178 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
179 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
180 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
183 * XXX We shouldn't depend on being able to use these indices.
184 * XXX Especially when some other Master PF has initialized the
185 * XXX adapter or we use the Firmware Configuration File. We
186 * XXX should really search through the Host Buffer Size register
187 * XXX array for the appropriately sized buffer indices.
189 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
190 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
192 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
193 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
196 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
197 #define MIN_NAPI_WORK 1
199 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
201 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
204 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
206 return !(d->dma_addr & RX_UNMAPPED_BUF);
210 * txq_avail - return the number of available slots in a Tx queue
213 * Returns the number of descriptors in a Tx queue available to write new
216 static inline unsigned int txq_avail(const struct sge_txq *q)
218 return q->size - 1 - q->in_use;
222 * fl_cap - return the capacity of a free-buffer list
225 * Returns the capacity of a free-buffer list. The capacity is less than
226 * the size because one descriptor needs to be left unpopulated, otherwise
227 * HW will think the FL is empty.
229 static inline unsigned int fl_cap(const struct sge_fl *fl)
231 return fl->size - 8; /* 1 descriptor = 8 buffers */
235 * fl_starving - return whether a Free List is starving.
236 * @adapter: pointer to the adapter
239 * Tests specified Free List to see whether the number of buffers
240 * available to the hardware has falled below our "starvation"
243 static inline bool fl_starving(const struct adapter *adapter,
244 const struct sge_fl *fl)
246 const struct sge *s = &adapter->sge;
248 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
251 static int map_skb(struct device *dev, const struct sk_buff *skb,
254 const skb_frag_t *fp, *end;
255 const struct skb_shared_info *si;
257 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
258 if (dma_mapping_error(dev, *addr))
261 si = skb_shinfo(skb);
262 end = &si->frags[si->nr_frags];
264 for (fp = si->frags; fp < end; fp++) {
265 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
267 if (dma_mapping_error(dev, *addr))
273 while (fp-- > si->frags)
274 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
276 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
281 #ifdef CONFIG_NEED_DMA_MAP_STATE
282 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
283 const dma_addr_t *addr)
285 const skb_frag_t *fp, *end;
286 const struct skb_shared_info *si;
288 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
290 si = skb_shinfo(skb);
291 end = &si->frags[si->nr_frags];
292 for (fp = si->frags; fp < end; fp++)
293 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
297 * deferred_unmap_destructor - unmap a packet when it is freed
300 * This is the packet destructor used for Tx packets that need to remain
301 * mapped until they are freed rather than until their Tx descriptors are
304 static void deferred_unmap_destructor(struct sk_buff *skb)
306 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
310 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
311 const struct ulptx_sgl *sgl, const struct sge_txq *q)
313 const struct ulptx_sge_pair *p;
314 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
316 if (likely(skb_headlen(skb)))
317 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
320 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
326 * the complexity below is because of the possibility of a wrap-around
327 * in the middle of an SGL
329 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
330 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
331 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
332 ntohl(p->len[0]), DMA_TO_DEVICE);
333 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
334 ntohl(p->len[1]), DMA_TO_DEVICE);
336 } else if ((u8 *)p == (u8 *)q->stat) {
337 p = (const struct ulptx_sge_pair *)q->desc;
339 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
340 const __be64 *addr = (const __be64 *)q->desc;
342 dma_unmap_page(dev, be64_to_cpu(addr[0]),
343 ntohl(p->len[0]), DMA_TO_DEVICE);
344 dma_unmap_page(dev, be64_to_cpu(addr[1]),
345 ntohl(p->len[1]), DMA_TO_DEVICE);
346 p = (const struct ulptx_sge_pair *)&addr[2];
348 const __be64 *addr = (const __be64 *)q->desc;
350 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
351 ntohl(p->len[0]), DMA_TO_DEVICE);
352 dma_unmap_page(dev, be64_to_cpu(addr[0]),
353 ntohl(p->len[1]), DMA_TO_DEVICE);
354 p = (const struct ulptx_sge_pair *)&addr[1];
360 if ((u8 *)p == (u8 *)q->stat)
361 p = (const struct ulptx_sge_pair *)q->desc;
362 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
363 *(const __be64 *)q->desc;
364 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
370 * free_tx_desc - reclaims Tx descriptors and their buffers
371 * @adapter: the adapter
372 * @q: the Tx queue to reclaim descriptors from
373 * @n: the number of descriptors to reclaim
374 * @unmap: whether the buffers should be unmapped for DMA
376 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
377 * Tx buffers. Called with the Tx queue lock held.
379 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
380 unsigned int n, bool unmap)
382 struct tx_sw_desc *d;
383 unsigned int cidx = q->cidx;
384 struct device *dev = adap->pdev_dev;
388 if (d->skb) { /* an SGL is present */
390 unmap_sgl(dev, d->skb, d->sgl, q);
391 dev_consume_skb_any(d->skb);
395 if (++cidx == q->size) {
404 * Return the number of reclaimable descriptors in a Tx queue.
406 static inline int reclaimable(const struct sge_txq *q)
408 int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
410 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
414 * reclaim_completed_tx - reclaims completed Tx descriptors
416 * @q: the Tx queue to reclaim completed descriptors from
417 * @unmap: whether the buffers should be unmapped for DMA
419 * Reclaims Tx descriptors that the SGE has indicated it has processed,
420 * and frees the associated buffers if possible. Called with the Tx
423 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
426 int avail = reclaimable(q);
430 * Limit the amount of clean up work we do at a time to keep
431 * the Tx lock hold time O(1).
433 if (avail > MAX_TX_RECLAIM)
434 avail = MAX_TX_RECLAIM;
436 free_tx_desc(adap, q, avail, unmap);
441 static inline int get_buf_size(struct adapter *adapter,
442 const struct rx_sw_desc *d)
444 struct sge *s = &adapter->sge;
445 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
448 switch (rx_buf_size_idx) {
449 case RX_SMALL_PG_BUF:
450 buf_size = PAGE_SIZE;
453 case RX_LARGE_PG_BUF:
454 buf_size = PAGE_SIZE << s->fl_pg_order;
457 case RX_SMALL_MTU_BUF:
458 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
461 case RX_LARGE_MTU_BUF:
462 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
473 * free_rx_bufs - free the Rx buffers on an SGE free list
475 * @q: the SGE free list to free buffers from
476 * @n: how many buffers to free
478 * Release the next @n buffers on an SGE free-buffer Rx queue. The
479 * buffers must be made inaccessible to HW before calling this function.
481 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
484 struct rx_sw_desc *d = &q->sdesc[q->cidx];
486 if (is_buf_mapped(d))
487 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
488 get_buf_size(adap, d),
492 if (++q->cidx == q->size)
499 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
501 * @q: the SGE free list
503 * Unmap the current buffer on an SGE free-buffer Rx queue. The
504 * buffer must be made inaccessible to HW before calling this function.
506 * This is similar to @free_rx_bufs above but does not free the buffer.
507 * Do note that the FL still loses any further access to the buffer.
509 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
511 struct rx_sw_desc *d = &q->sdesc[q->cidx];
513 if (is_buf_mapped(d))
514 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
515 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
517 if (++q->cidx == q->size)
522 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
524 if (q->pend_cred >= 8) {
525 u32 val = adap->params.arch.sge_fl_db;
527 if (is_t4(adap->params.chip))
528 val |= PIDX_V(q->pend_cred / 8);
530 val |= PIDX_T5_V(q->pend_cred / 8);
532 /* Make sure all memory writes to the Free List queue are
533 * committed before we tell the hardware about them.
537 /* If we don't have access to the new User Doorbell (T5+), use
538 * the old doorbell mechanism; otherwise use the new BAR2
541 if (unlikely(q->bar2_addr == NULL)) {
542 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
543 val | QID_V(q->cntxt_id));
545 writel(val | QID_V(q->bar2_qid),
546 q->bar2_addr + SGE_UDB_KDOORBELL);
548 /* This Write memory Barrier will force the write to
549 * the User Doorbell area to be flushed.
557 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
561 sd->dma_addr = mapping; /* includes size low bits */
565 * refill_fl - refill an SGE Rx buffer ring
567 * @q: the ring to refill
568 * @n: the number of new buffers to allocate
569 * @gfp: the gfp flags for the allocations
571 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
572 * allocated with the supplied gfp flags. The caller must assure that
573 * @n does not exceed the queue's capacity. If afterwards the queue is
574 * found critically low mark it as starving in the bitmap of starving FLs.
576 * Returns the number of buffers allocated.
578 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
581 struct sge *s = &adap->sge;
584 unsigned int cred = q->avail;
585 __be64 *d = &q->desc[q->pidx];
586 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
589 #ifdef CONFIG_DEBUG_FS
590 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
595 node = dev_to_node(adap->pdev_dev);
597 if (s->fl_pg_order == 0)
598 goto alloc_small_pages;
601 * Prefer large buffers
604 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
606 q->large_alloc_failed++;
607 break; /* fall back to single pages */
610 mapping = dma_map_page(adap->pdev_dev, pg, 0,
611 PAGE_SIZE << s->fl_pg_order,
613 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
614 __free_pages(pg, s->fl_pg_order);
616 goto out; /* do not try small pages for this error */
618 mapping |= RX_LARGE_PG_BUF;
619 *d++ = cpu_to_be64(mapping);
621 set_rx_sw_desc(sd, pg, mapping);
625 if (++q->pidx == q->size) {
635 pg = alloc_pages_node(node, gfp, 0);
641 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
643 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
648 *d++ = cpu_to_be64(mapping);
650 set_rx_sw_desc(sd, pg, mapping);
654 if (++q->pidx == q->size) {
661 out: cred = q->avail - cred;
662 q->pend_cred += cred;
665 if (unlikely(fl_starving(adap, q))) {
668 set_bit(q->cntxt_id - adap->sge.egr_start,
669 adap->sge.starving_fl);
675 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
677 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
682 * alloc_ring - allocate resources for an SGE descriptor ring
683 * @dev: the PCI device's core device
684 * @nelem: the number of descriptors
685 * @elem_size: the size of each descriptor
686 * @sw_size: the size of the SW state associated with each ring element
687 * @phys: the physical address of the allocated ring
688 * @metadata: address of the array holding the SW state for the ring
689 * @stat_size: extra space in HW ring for status information
690 * @node: preferred node for memory allocations
692 * Allocates resources for an SGE descriptor ring, such as Tx queues,
693 * free buffer lists, or response queues. Each SGE ring requires
694 * space for its HW descriptors plus, optionally, space for the SW state
695 * associated with each HW entry (the metadata). The function returns
696 * three values: the virtual address for the HW ring (the return value
697 * of the function), the bus address of the HW ring, and the address
700 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
701 size_t sw_size, dma_addr_t *phys, void *metadata,
702 size_t stat_size, int node)
704 size_t len = nelem * elem_size + stat_size;
706 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
711 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
714 dma_free_coherent(dev, len, p, *phys);
719 *(void **)metadata = s;
725 * sgl_len - calculates the size of an SGL of the given capacity
726 * @n: the number of SGL entries
728 * Calculates the number of flits needed for a scatter/gather list that
729 * can hold the given number of entries.
731 static inline unsigned int sgl_len(unsigned int n)
733 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
734 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
735 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
736 * repeated sequences of { Length[i], Length[i+1], Address[i],
737 * Address[i+1] } (this ensures that all addresses are on 64-bit
738 * boundaries). If N is even, then Length[N+1] should be set to 0 and
739 * Address[N+1] is omitted.
741 * The following calculation incorporates all of the above. It's
742 * somewhat hard to follow but, briefly: the "+2" accounts for the
743 * first two flits which include the DSGL header, Length0 and
744 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
745 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
746 * finally the "+((n-1)&1)" adds the one remaining flit needed if
750 return (3 * n) / 2 + (n & 1) + 2;
754 * flits_to_desc - returns the num of Tx descriptors for the given flits
755 * @n: the number of flits
757 * Returns the number of Tx descriptors needed for the supplied number
760 static inline unsigned int flits_to_desc(unsigned int n)
762 BUG_ON(n > SGE_MAX_WR_LEN / 8);
763 return DIV_ROUND_UP(n, 8);
767 * is_eth_imm - can an Ethernet packet be sent as immediate data?
770 * Returns whether an Ethernet packet is small enough to fit as
771 * immediate data. Return value corresponds to headroom required.
773 static inline int is_eth_imm(const struct sk_buff *skb)
775 int hdrlen = skb_shinfo(skb)->gso_size ?
776 sizeof(struct cpl_tx_pkt_lso_core) : 0;
778 hdrlen += sizeof(struct cpl_tx_pkt);
779 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
785 * calc_tx_flits - calculate the number of flits for a packet Tx WR
788 * Returns the number of flits needed for a Tx WR for the given Ethernet
789 * packet, including the needed WR and CPL headers.
791 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
794 int hdrlen = is_eth_imm(skb);
796 /* If the skb is small enough, we can pump it out as a work request
797 * with only immediate data. In that case we just have to have the
798 * TX Packet header plus the skb data in the Work Request.
802 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
804 /* Otherwise, we're going to have to construct a Scatter gather list
805 * of the skb body and fragments. We also include the flits necessary
806 * for the TX Packet Work Request and CPL. We always have a firmware
807 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
808 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
809 * message or, if we're doing a Large Send Offload, an LSO CPL message
810 * with an embedded TX Packet Write CPL message.
812 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
813 if (skb_shinfo(skb)->gso_size)
814 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
815 sizeof(struct cpl_tx_pkt_lso_core) +
816 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
818 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
819 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
824 * calc_tx_descs - calculate the number of Tx descriptors for a packet
827 * Returns the number of Tx descriptors needed for the given Ethernet
828 * packet, including the needed WR and CPL headers.
830 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
832 return flits_to_desc(calc_tx_flits(skb));
836 * write_sgl - populate a scatter/gather list for a packet
838 * @q: the Tx queue we are writing into
839 * @sgl: starting location for writing the SGL
840 * @end: points right after the end of the SGL
841 * @start: start offset into skb main-body data to include in the SGL
842 * @addr: the list of bus addresses for the SGL elements
844 * Generates a gather list for the buffers that make up a packet.
845 * The caller must provide adequate space for the SGL that will be written.
846 * The SGL includes all of the packet's page fragments and the data in its
847 * main body except for the first @start bytes. @sgl must be 16-byte
848 * aligned and within a Tx descriptor with available space. @end points
849 * right after the end of the SGL but does not account for any potential
850 * wrap around, i.e., @end > @sgl.
852 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
853 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
854 const dma_addr_t *addr)
857 struct ulptx_sge_pair *to;
858 const struct skb_shared_info *si = skb_shinfo(skb);
859 unsigned int nfrags = si->nr_frags;
860 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
862 len = skb_headlen(skb) - start;
864 sgl->len0 = htonl(len);
865 sgl->addr0 = cpu_to_be64(addr[0] + start);
868 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
869 sgl->addr0 = cpu_to_be64(addr[1]);
872 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
873 ULPTX_NSGE_V(nfrags));
874 if (likely(--nfrags == 0))
877 * Most of the complexity below deals with the possibility we hit the
878 * end of the queue in the middle of writing the SGL. For this case
879 * only we create the SGL in a temporary buffer and then copy it.
881 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
883 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
884 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
885 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
886 to->addr[0] = cpu_to_be64(addr[i]);
887 to->addr[1] = cpu_to_be64(addr[++i]);
890 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
891 to->len[1] = cpu_to_be32(0);
892 to->addr[0] = cpu_to_be64(addr[i + 1]);
894 if (unlikely((u8 *)end > (u8 *)q->stat)) {
895 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
898 memcpy(sgl->sge, buf, part0);
899 part1 = (u8 *)end - (u8 *)q->stat;
900 memcpy(q->desc, (u8 *)buf + part0, part1);
901 end = (void *)q->desc + part1;
903 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
907 /* This function copies 64 byte coalesced work request to
908 * memory mapped BAR2 space. For coalesced WR SGE fetches
909 * data from the FIFO instead of from Host.
911 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
924 * ring_tx_db - check and potentially ring a Tx queue's doorbell
927 * @n: number of new descriptors to give to HW
929 * Ring the doorbel for a Tx queue.
931 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
933 /* Make sure that all writes to the TX Descriptors are committed
934 * before we tell the hardware about them.
938 /* If we don't have access to the new User Doorbell (T5+), use the old
939 * doorbell mechanism; otherwise use the new BAR2 mechanism.
941 if (unlikely(q->bar2_addr == NULL)) {
945 /* For T4 we need to participate in the Doorbell Recovery
948 spin_lock_irqsave(&q->db_lock, flags);
950 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
951 QID_V(q->cntxt_id) | val);
954 q->db_pidx = q->pidx;
955 spin_unlock_irqrestore(&q->db_lock, flags);
957 u32 val = PIDX_T5_V(n);
959 /* T4 and later chips share the same PIDX field offset within
960 * the doorbell, but T5 and later shrank the field in order to
961 * gain a bit for Doorbell Priority. The field was absurdly
962 * large in the first place (14 bits) so we just use the T5
963 * and later limits and warn if a Queue ID is too large.
965 WARN_ON(val & DBPRIO_F);
967 /* If we're only writing a single TX Descriptor and we can use
968 * Inferred QID registers, we can use the Write Combining
969 * Gather Buffer; otherwise we use the simple doorbell.
971 if (n == 1 && q->bar2_qid == 0) {
975 u64 *wr = (u64 *)&q->desc[index];
977 cxgb_pio_copy((u64 __iomem *)
978 (q->bar2_addr + SGE_UDB_WCDOORBELL),
981 writel(val | QID_V(q->bar2_qid),
982 q->bar2_addr + SGE_UDB_KDOORBELL);
985 /* This Write Memory Barrier will force the write to the User
986 * Doorbell area to be flushed. This is needed to prevent
987 * writes on different CPUs for the same queue from hitting
988 * the adapter out of order. This is required when some Work
989 * Requests take the Write Combine Gather Buffer path (user
990 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
991 * take the traditional path where we simply increment the
992 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
993 * hardware DMA read the actual Work Request.
1000 * inline_tx_skb - inline a packet's data into Tx descriptors
1002 * @q: the Tx queue where the packet will be inlined
1003 * @pos: starting position in the Tx queue where to inline the packet
1005 * Inline a packet's contents directly into Tx descriptors, starting at
1006 * the given position within the Tx DMA ring.
1007 * Most of the complexity of this operation is dealing with wrap arounds
1008 * in the middle of the packet we want to inline.
1010 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
1014 int left = (void *)q->stat - pos;
1016 if (likely(skb->len <= left)) {
1017 if (likely(!skb->data_len))
1018 skb_copy_from_linear_data(skb, pos, skb->len);
1020 skb_copy_bits(skb, 0, pos, skb->len);
1023 skb_copy_bits(skb, 0, pos, left);
1024 skb_copy_bits(skb, left, q->desc, skb->len - left);
1025 pos = (void *)q->desc + (skb->len - left);
1028 /* 0-pad to multiple of 16 */
1029 p = PTR_ALIGN(pos, 8);
1030 if ((uintptr_t)p & 8)
1034 static void *inline_tx_skb_header(const struct sk_buff *skb,
1035 const struct sge_txq *q, void *pos,
1039 int left = (void *)q->stat - pos;
1041 if (likely(length <= left)) {
1042 memcpy(pos, skb->data, length);
1045 memcpy(pos, skb->data, left);
1046 memcpy(q->desc, skb->data + left, length - left);
1047 pos = (void *)q->desc + (length - left);
1049 /* 0-pad to multiple of 16 */
1050 p = PTR_ALIGN(pos, 8);
1051 if ((uintptr_t)p & 8) {
1059 * Figure out what HW csum a packet wants and return the appropriate control
1062 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1065 const struct iphdr *iph = ip_hdr(skb);
1067 if (iph->version == 4) {
1068 if (iph->protocol == IPPROTO_TCP)
1069 csum_type = TX_CSUM_TCPIP;
1070 else if (iph->protocol == IPPROTO_UDP)
1071 csum_type = TX_CSUM_UDPIP;
1074 * unknown protocol, disable HW csum
1075 * and hope a bad packet is detected
1077 return TXPKT_L4CSUM_DIS_F;
1081 * this doesn't work with extension headers
1083 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1085 if (ip6h->nexthdr == IPPROTO_TCP)
1086 csum_type = TX_CSUM_TCPIP6;
1087 else if (ip6h->nexthdr == IPPROTO_UDP)
1088 csum_type = TX_CSUM_UDPIP6;
1093 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1094 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1095 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1097 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1098 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1100 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1101 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1103 int start = skb_transport_offset(skb);
1105 return TXPKT_CSUM_TYPE_V(csum_type) |
1106 TXPKT_CSUM_START_V(start) |
1107 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1111 static void eth_txq_stop(struct sge_eth_txq *q)
1113 netif_tx_stop_queue(q->txq);
1117 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1121 if (q->pidx >= q->size)
1125 #ifdef CONFIG_CHELSIO_T4_FCOE
1127 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1128 const struct port_info *pi, u64 *cntrl)
1130 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1132 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1135 if (skb->protocol != htons(ETH_P_FCOE))
1138 skb_reset_mac_header(skb);
1139 skb->mac_len = sizeof(struct ethhdr);
1141 skb_set_network_header(skb, skb->mac_len);
1142 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1144 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1147 /* FC CRC offload */
1148 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1149 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1150 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1151 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1152 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1155 #endif /* CONFIG_CHELSIO_T4_FCOE */
1158 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1160 * @dev: the egress net device
1162 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1164 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1166 u32 wr_mid, ctrl0, op;
1169 unsigned int flits, ndesc;
1170 struct adapter *adap;
1171 struct sge_eth_txq *q;
1172 const struct port_info *pi;
1173 struct fw_eth_tx_pkt_wr *wr;
1174 struct cpl_tx_pkt_core *cpl;
1175 const struct skb_shared_info *ssi;
1176 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1177 bool immediate = false;
1178 int len, max_pkt_len;
1179 bool ptp_enabled = is_ptp_enabled(skb, dev);
1180 #ifdef CONFIG_CHELSIO_T4_FCOE
1182 #endif /* CONFIG_CHELSIO_T4_FCOE */
1185 * The chip min packet length is 10 octets but play safe and reject
1186 * anything shorter than an Ethernet header.
1188 if (unlikely(skb->len < ETH_HLEN)) {
1189 out_free: dev_kfree_skb_any(skb);
1190 return NETDEV_TX_OK;
1193 /* Discard the packet if the length is greater than mtu */
1194 max_pkt_len = ETH_HLEN + dev->mtu;
1195 if (skb_vlan_tagged(skb))
1196 max_pkt_len += VLAN_HLEN;
1197 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1200 pi = netdev_priv(dev);
1202 qidx = skb_get_queue_mapping(skb);
1204 spin_lock(&adap->ptp_lock);
1205 if (!(adap->ptp_tx_skb)) {
1206 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1207 adap->ptp_tx_skb = skb_get(skb);
1209 spin_unlock(&adap->ptp_lock);
1212 q = &adap->sge.ptptxq;
1214 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1216 skb_tx_timestamp(skb);
1218 reclaim_completed_tx(adap, &q->q, true);
1219 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1221 #ifdef CONFIG_CHELSIO_T4_FCOE
1222 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1223 if (unlikely(err == -ENOTSUPP)) {
1225 spin_unlock(&adap->ptp_lock);
1228 #endif /* CONFIG_CHELSIO_T4_FCOE */
1230 flits = calc_tx_flits(skb);
1231 ndesc = flits_to_desc(flits);
1232 credits = txq_avail(&q->q) - ndesc;
1234 if (unlikely(credits < 0)) {
1236 dev_err(adap->pdev_dev,
1237 "%s: Tx ring %u full while queue awake!\n",
1240 spin_unlock(&adap->ptp_lock);
1241 return NETDEV_TX_BUSY;
1244 if (is_eth_imm(skb))
1248 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1251 spin_unlock(&adap->ptp_lock);
1255 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1256 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1258 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1261 wr = (void *)&q->q.desc[q->q.pidx];
1262 wr->equiq_to_len16 = htonl(wr_mid);
1263 wr->r3 = cpu_to_be64(0);
1264 end = (u64 *)wr + flits;
1266 len = immediate ? skb->len : 0;
1267 ssi = skb_shinfo(skb);
1268 if (ssi->gso_size) {
1269 struct cpl_tx_pkt_lso *lso = (void *)wr;
1270 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1271 int l3hdr_len = skb_network_header_len(skb);
1272 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1274 len += sizeof(*lso);
1275 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1276 FW_WR_IMMDLEN_V(len));
1277 lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1278 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1280 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1281 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1282 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1283 lso->c.ipid_ofst = htons(0);
1284 lso->c.mss = htons(ssi->gso_size);
1285 lso->c.seqno_offset = htonl(0);
1286 if (is_t4(adap->params.chip))
1287 lso->c.len = htonl(skb->len);
1289 lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1290 cpl = (void *)(lso + 1);
1292 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1293 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1295 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1297 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1298 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1299 TXPKT_IPHDR_LEN_V(l3hdr_len);
1301 q->tx_cso += ssi->gso_segs;
1303 len += sizeof(*cpl);
1305 op = FW_PTP_TX_PKT_WR;
1307 op = FW_ETH_TX_PKT_WR;
1308 wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1309 FW_WR_IMMDLEN_V(len));
1310 cpl = (void *)(wr + 1);
1311 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1312 cntrl = hwcsum(adap->params.chip, skb) |
1318 if (skb_vlan_tag_present(skb)) {
1320 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1321 #ifdef CONFIG_CHELSIO_T4_FCOE
1322 if (skb->protocol == htons(ETH_P_FCOE))
1323 cntrl |= TXPKT_VLAN_V(
1324 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1325 #endif /* CONFIG_CHELSIO_T4_FCOE */
1328 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1329 TXPKT_PF_V(adap->pf);
1331 ctrl0 |= TXPKT_TSTAMP_F;
1332 #ifdef CONFIG_CHELSIO_T4_DCB
1333 if (is_t4(adap->params.chip))
1334 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1336 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1338 cpl->ctrl0 = htonl(ctrl0);
1339 cpl->pack = htons(0);
1340 cpl->len = htons(skb->len);
1341 cpl->ctrl1 = cpu_to_be64(cntrl);
1344 inline_tx_skb(skb, &q->q, cpl + 1);
1345 dev_consume_skb_any(skb);
1349 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1353 last_desc = q->q.pidx + ndesc - 1;
1354 if (last_desc >= q->q.size)
1355 last_desc -= q->q.size;
1356 q->q.sdesc[last_desc].skb = skb;
1357 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1360 txq_advance(&q->q, ndesc);
1362 ring_tx_db(adap, &q->q, ndesc);
1364 spin_unlock(&adap->ptp_lock);
1365 return NETDEV_TX_OK;
1369 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1370 * @q: the SGE control Tx queue
1372 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1373 * that send only immediate data (presently just the control queues) and
1374 * thus do not have any sk_buffs to release.
1376 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1378 int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
1379 int reclaim = hw_cidx - q->cidx;
1384 q->in_use -= reclaim;
1389 * is_imm - check whether a packet can be sent as immediate data
1392 * Returns true if a packet can be sent as a WR with immediate data.
1394 static inline int is_imm(const struct sk_buff *skb)
1396 return skb->len <= MAX_CTRL_WR_LEN;
1400 * ctrlq_check_stop - check if a control queue is full and should stop
1402 * @wr: most recent WR written to the queue
1404 * Check if a control queue has become full and should be stopped.
1405 * We clean up control queue descriptors very lazily, only when we are out.
1406 * If the queue is still full after reclaiming any completed descriptors
1407 * we suspend it and have the last WR wake it up.
1409 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1411 reclaim_completed_tx_imm(&q->q);
1412 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1413 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1420 * ctrl_xmit - send a packet through an SGE control Tx queue
1421 * @q: the control queue
1424 * Send a packet through an SGE control Tx queue. Packets sent through
1425 * a control queue must fit entirely as immediate data.
1427 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1430 struct fw_wr_hdr *wr;
1432 if (unlikely(!is_imm(skb))) {
1435 return NET_XMIT_DROP;
1438 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1439 spin_lock(&q->sendq.lock);
1441 if (unlikely(q->full)) {
1442 skb->priority = ndesc; /* save for restart */
1443 __skb_queue_tail(&q->sendq, skb);
1444 spin_unlock(&q->sendq.lock);
1448 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1449 inline_tx_skb(skb, &q->q, wr);
1451 txq_advance(&q->q, ndesc);
1452 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1453 ctrlq_check_stop(q, wr);
1455 ring_tx_db(q->adap, &q->q, ndesc);
1456 spin_unlock(&q->sendq.lock);
1459 return NET_XMIT_SUCCESS;
1463 * restart_ctrlq - restart a suspended control queue
1464 * @data: the control queue to restart
1466 * Resumes transmission on a suspended Tx control queue.
1468 static void restart_ctrlq(unsigned long data)
1470 struct sk_buff *skb;
1471 unsigned int written = 0;
1472 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1474 spin_lock(&q->sendq.lock);
1475 reclaim_completed_tx_imm(&q->q);
1476 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1478 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1479 struct fw_wr_hdr *wr;
1480 unsigned int ndesc = skb->priority; /* previously saved */
1483 /* Write descriptors and free skbs outside the lock to limit
1484 * wait times. q->full is still set so new skbs will be queued.
1486 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1487 txq_advance(&q->q, ndesc);
1488 spin_unlock(&q->sendq.lock);
1490 inline_tx_skb(skb, &q->q, wr);
1493 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1494 unsigned long old = q->q.stops;
1496 ctrlq_check_stop(q, wr);
1497 if (q->q.stops != old) { /* suspended anew */
1498 spin_lock(&q->sendq.lock);
1503 ring_tx_db(q->adap, &q->q, written);
1506 spin_lock(&q->sendq.lock);
1509 ringdb: if (written)
1510 ring_tx_db(q->adap, &q->q, written);
1511 spin_unlock(&q->sendq.lock);
1515 * t4_mgmt_tx - send a management message
1516 * @adap: the adapter
1517 * @skb: the packet containing the management message
1519 * Send a management message through control queue 0.
1521 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1526 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1532 * is_ofld_imm - check whether a packet can be sent as immediate data
1535 * Returns true if a packet can be sent as an offload WR with immediate
1536 * data. We currently use the same limit as for Ethernet packets.
1538 static inline int is_ofld_imm(const struct sk_buff *skb)
1540 return skb->len <= MAX_IMM_TX_PKT_LEN;
1544 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1547 * Returns the number of flits needed for the given offload packet.
1548 * These packets are already fully constructed and no additional headers
1551 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1553 unsigned int flits, cnt;
1555 if (is_ofld_imm(skb))
1556 return DIV_ROUND_UP(skb->len, 8);
1558 flits = skb_transport_offset(skb) / 8U; /* headers */
1559 cnt = skb_shinfo(skb)->nr_frags;
1560 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1562 return flits + sgl_len(cnt);
1566 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1567 * @adap: the adapter
1568 * @q: the queue to stop
1570 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1571 * inability to map packets. A periodic timer attempts to restart
1574 static void txq_stop_maperr(struct sge_uld_txq *q)
1578 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1579 q->adap->sge.txq_maperr);
1583 * ofldtxq_stop - stop an offload Tx queue that has become full
1584 * @q: the queue to stop
1585 * @skb: the packet causing the queue to become full
1587 * Stops an offload Tx queue that has become full and modifies the packet
1588 * being written to request a wakeup.
1590 static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb)
1592 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1594 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1600 * service_ofldq - service/restart a suspended offload queue
1601 * @q: the offload queue
1603 * Services an offload Tx queue by moving packets from its Pending Send
1604 * Queue to the Hardware TX ring. The function starts and ends with the
1605 * Send Queue locked, but drops the lock while putting the skb at the
1606 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
1607 * allows more skbs to be added to the Send Queue by other threads.
1608 * The packet being processed at the head of the Pending Send Queue is
1609 * left on the queue in case we experience DMA Mapping errors, etc.
1610 * and need to give up and restart later.
1612 * service_ofldq() can be thought of as a task which opportunistically
1613 * uses other threads execution contexts. We use the Offload Queue
1614 * boolean "service_ofldq_running" to make sure that only one instance
1615 * is ever running at a time ...
1617 static void service_ofldq(struct sge_uld_txq *q)
1619 u64 *pos, *before, *end;
1621 struct sk_buff *skb;
1622 struct sge_txq *txq;
1624 unsigned int written = 0;
1625 unsigned int flits, ndesc;
1627 /* If another thread is currently in service_ofldq() processing the
1628 * Pending Send Queue then there's nothing to do. Otherwise, flag
1629 * that we're doing the work and continue. Examining/modifying
1630 * the Offload Queue boolean "service_ofldq_running" must be done
1631 * while holding the Pending Send Queue Lock.
1633 if (q->service_ofldq_running)
1635 q->service_ofldq_running = true;
1637 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1638 /* We drop the lock while we're working with the skb at the
1639 * head of the Pending Send Queue. This allows more skbs to
1640 * be added to the Pending Send Queue while we're working on
1641 * this one. We don't need to lock to guard the TX Ring
1642 * updates because only one thread of execution is ever
1643 * allowed into service_ofldq() at a time.
1645 spin_unlock(&q->sendq.lock);
1647 reclaim_completed_tx(q->adap, &q->q, false);
1649 flits = skb->priority; /* previously saved */
1650 ndesc = flits_to_desc(flits);
1651 credits = txq_avail(&q->q) - ndesc;
1652 BUG_ON(credits < 0);
1653 if (unlikely(credits < TXQ_STOP_THRES))
1654 ofldtxq_stop(q, skb);
1656 pos = (u64 *)&q->q.desc[q->q.pidx];
1657 if (is_ofld_imm(skb))
1658 inline_tx_skb(skb, &q->q, pos);
1659 else if (map_skb(q->adap->pdev_dev, skb,
1660 (dma_addr_t *)skb->head)) {
1662 spin_lock(&q->sendq.lock);
1665 int last_desc, hdr_len = skb_transport_offset(skb);
1667 /* The WR headers may not fit within one descriptor.
1668 * So we need to deal with wrap-around here.
1670 before = (u64 *)pos;
1671 end = (u64 *)pos + flits;
1673 pos = (void *)inline_tx_skb_header(skb, &q->q,
1676 if (before > (u64 *)pos) {
1677 left = (u8 *)end - (u8 *)txq->stat;
1678 end = (void *)txq->desc + left;
1681 /* If current position is already at the end of the
1682 * ofld queue, reset the current to point to
1683 * start of the queue and update the end ptr as well.
1685 if (pos == (u64 *)txq->stat) {
1686 left = (u8 *)end - (u8 *)txq->stat;
1687 end = (void *)txq->desc + left;
1688 pos = (void *)txq->desc;
1691 write_sgl(skb, &q->q, (void *)pos,
1693 (dma_addr_t *)skb->head);
1694 #ifdef CONFIG_NEED_DMA_MAP_STATE
1695 skb->dev = q->adap->port[0];
1696 skb->destructor = deferred_unmap_destructor;
1698 last_desc = q->q.pidx + ndesc - 1;
1699 if (last_desc >= q->q.size)
1700 last_desc -= q->q.size;
1701 q->q.sdesc[last_desc].skb = skb;
1704 txq_advance(&q->q, ndesc);
1706 if (unlikely(written > 32)) {
1707 ring_tx_db(q->adap, &q->q, written);
1711 /* Reacquire the Pending Send Queue Lock so we can unlink the
1712 * skb we've just successfully transferred to the TX Ring and
1713 * loop for the next skb which may be at the head of the
1714 * Pending Send Queue.
1716 spin_lock(&q->sendq.lock);
1717 __skb_unlink(skb, &q->sendq);
1718 if (is_ofld_imm(skb))
1721 if (likely(written))
1722 ring_tx_db(q->adap, &q->q, written);
1724 /*Indicate that no thread is processing the Pending Send Queue
1727 q->service_ofldq_running = false;
1731 * ofld_xmit - send a packet through an offload queue
1732 * @q: the Tx offload queue
1735 * Send an offload packet through an SGE offload queue.
1737 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
1739 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1740 spin_lock(&q->sendq.lock);
1742 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
1743 * that results in this new skb being the only one on the queue, start
1744 * servicing it. If there are other skbs already on the list, then
1745 * either the queue is currently being processed or it's been stopped
1746 * for some reason and it'll be restarted at a later time. Restart
1747 * paths are triggered by events like experiencing a DMA Mapping Error
1748 * or filling the Hardware TX Ring.
1750 __skb_queue_tail(&q->sendq, skb);
1751 if (q->sendq.qlen == 1)
1754 spin_unlock(&q->sendq.lock);
1755 return NET_XMIT_SUCCESS;
1759 * restart_ofldq - restart a suspended offload queue
1760 * @data: the offload queue to restart
1762 * Resumes transmission on a suspended Tx offload queue.
1764 static void restart_ofldq(unsigned long data)
1766 struct sge_uld_txq *q = (struct sge_uld_txq *)data;
1768 spin_lock(&q->sendq.lock);
1769 q->full = 0; /* the queue actually is completely empty now */
1771 spin_unlock(&q->sendq.lock);
1775 * skb_txq - return the Tx queue an offload packet should use
1778 * Returns the Tx queue an offload packet should use as indicated by bits
1779 * 1-15 in the packet's queue_mapping.
1781 static inline unsigned int skb_txq(const struct sk_buff *skb)
1783 return skb->queue_mapping >> 1;
1787 * is_ctrl_pkt - return whether an offload packet is a control packet
1790 * Returns whether an offload packet should use an OFLD or a CTRL
1791 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1793 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1795 return skb->queue_mapping & 1;
1798 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
1799 unsigned int tx_uld_type)
1801 struct sge_uld_txq_info *txq_info;
1802 struct sge_uld_txq *txq;
1803 unsigned int idx = skb_txq(skb);
1805 if (unlikely(is_ctrl_pkt(skb))) {
1806 /* Single ctrl queue is a requirement for LE workaround path */
1807 if (adap->tids.nsftids)
1809 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1812 txq_info = adap->sge.uld_txq_info[tx_uld_type];
1813 if (unlikely(!txq_info)) {
1816 return NET_XMIT_DROP;
1819 txq = &txq_info->uldtxq[idx];
1820 return ofld_xmit(txq, skb);
1824 * t4_ofld_send - send an offload packet
1825 * @adap: the adapter
1828 * Sends an offload packet. We use the packet queue_mapping to select the
1829 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1830 * should be sent as regular or control, bits 1-15 select the queue.
1832 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1837 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
1843 * cxgb4_ofld_send - send an offload packet
1844 * @dev: the net device
1847 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1848 * intended for ULDs.
1850 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1852 return t4_ofld_send(netdev2adap(dev), skb);
1854 EXPORT_SYMBOL(cxgb4_ofld_send);
1857 * t4_crypto_send - send crypto packet
1858 * @adap: the adapter
1861 * Sends crypto packet. We use the packet queue_mapping to select the
1862 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1863 * should be sent as regular or control, bits 1-15 select the queue.
1865 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
1870 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
1876 * cxgb4_crypto_send - send crypto packet
1877 * @dev: the net device
1880 * Sends crypto packet. This is an exported version of @t4_crypto_send,
1881 * intended for ULDs.
1883 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
1885 return t4_crypto_send(netdev2adap(dev), skb);
1887 EXPORT_SYMBOL(cxgb4_crypto_send);
1889 static inline void copy_frags(struct sk_buff *skb,
1890 const struct pkt_gl *gl, unsigned int offset)
1894 /* usually there's just one frag */
1895 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1896 gl->frags[0].offset + offset,
1897 gl->frags[0].size - offset);
1898 skb_shinfo(skb)->nr_frags = gl->nfrags;
1899 for (i = 1; i < gl->nfrags; i++)
1900 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1901 gl->frags[i].offset,
1904 /* get a reference to the last page, we don't own it */
1905 get_page(gl->frags[gl->nfrags - 1].page);
1909 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1910 * @gl: the gather list
1911 * @skb_len: size of sk_buff main body if it carries fragments
1912 * @pull_len: amount of data to move to the sk_buff's main body
1914 * Builds an sk_buff from the given packet gather list. Returns the
1915 * sk_buff or %NULL if sk_buff allocation failed.
1917 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1918 unsigned int skb_len, unsigned int pull_len)
1920 struct sk_buff *skb;
1923 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1924 * size, which is expected since buffers are at least PAGE_SIZEd.
1925 * In this case packets up to RX_COPY_THRES have only one fragment.
1927 if (gl->tot_len <= RX_COPY_THRES) {
1928 skb = dev_alloc_skb(gl->tot_len);
1931 __skb_put(skb, gl->tot_len);
1932 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1934 skb = dev_alloc_skb(skb_len);
1937 __skb_put(skb, pull_len);
1938 skb_copy_to_linear_data(skb, gl->va, pull_len);
1940 copy_frags(skb, gl, pull_len);
1941 skb->len = gl->tot_len;
1942 skb->data_len = skb->len - pull_len;
1943 skb->truesize += skb->data_len;
1947 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1950 * t4_pktgl_free - free a packet gather list
1951 * @gl: the gather list
1953 * Releases the pages of a packet gather list. We do not own the last
1954 * page on the list and do not free it.
1956 static void t4_pktgl_free(const struct pkt_gl *gl)
1959 const struct page_frag *p;
1961 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1966 * Process an MPS trace packet. Give it an unused protocol number so it won't
1967 * be delivered to anyone and send it to the stack for capture.
1969 static noinline int handle_trace_pkt(struct adapter *adap,
1970 const struct pkt_gl *gl)
1972 struct sk_buff *skb;
1974 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1975 if (unlikely(!skb)) {
1980 if (is_t4(adap->params.chip))
1981 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1983 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1985 skb_reset_mac_header(skb);
1986 skb->protocol = htons(0xffff);
1987 skb->dev = adap->port[0];
1988 netif_receive_skb(skb);
1993 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
1994 * @adap: the adapter
1995 * @hwtstamps: time stamp structure to update
1996 * @sgetstamp: 60bit iqe timestamp
1998 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
1999 * which is in Core Clock ticks into ktime_t and assign it
2001 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
2002 struct skb_shared_hwtstamps *hwtstamps,
2006 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
2008 ns = div_u64(tmp, adap->params.vpd.cclk);
2010 memset(hwtstamps, 0, sizeof(*hwtstamps));
2011 hwtstamps->hwtstamp = ns_to_ktime(ns);
2014 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
2015 const struct cpl_rx_pkt *pkt)
2017 struct adapter *adapter = rxq->rspq.adap;
2018 struct sge *s = &adapter->sge;
2019 struct port_info *pi;
2021 struct sk_buff *skb;
2023 skb = napi_get_frags(&rxq->rspq.napi);
2024 if (unlikely(!skb)) {
2026 rxq->stats.rx_drops++;
2030 copy_frags(skb, gl, s->pktshift);
2031 skb->len = gl->tot_len - s->pktshift;
2032 skb->data_len = skb->len;
2033 skb->truesize += skb->data_len;
2034 skb->ip_summed = CHECKSUM_UNNECESSARY;
2035 skb_record_rx_queue(skb, rxq->rspq.idx);
2036 pi = netdev_priv(skb->dev);
2038 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
2040 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
2041 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2044 if (unlikely(pkt->vlan_ex)) {
2045 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2046 rxq->stats.vlan_ex++;
2048 ret = napi_gro_frags(&rxq->rspq.napi);
2049 if (ret == GRO_HELD)
2050 rxq->stats.lro_pkts++;
2051 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
2052 rxq->stats.lro_merged++;
2054 rxq->stats.rx_cso++;
2064 * t4_systim_to_hwstamp - read hardware time stamp
2065 * @adap: the adapter
2068 * Read Time Stamp from MPS packet and insert in skb which
2069 * is forwarded to PTP application
2071 static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
2072 struct sk_buff *skb)
2074 struct skb_shared_hwtstamps *hwtstamps;
2075 struct cpl_rx_mps_pkt *cpl = NULL;
2076 unsigned char *data;
2079 cpl = (struct cpl_rx_mps_pkt *)skb->data;
2080 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
2081 X_CPL_RX_MPS_PKT_TYPE_PTP))
2082 return RX_PTP_PKT_ERR;
2084 data = skb->data + sizeof(*cpl);
2085 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
2086 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
2087 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
2088 return RX_PTP_PKT_ERR;
2090 hwtstamps = skb_hwtstamps(skb);
2091 memset(hwtstamps, 0, sizeof(*hwtstamps));
2092 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
2094 return RX_PTP_PKT_SUC;
2098 * t4_rx_hststamp - Recv PTP Event Message
2099 * @adap: the adapter
2100 * @rsp: the response queue descriptor holding the RX_PKT message
2103 * PTP enabled and MPS packet, read HW timestamp
2105 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
2106 struct sge_eth_rxq *rxq, struct sk_buff *skb)
2110 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
2111 !is_t4(adapter->params.chip))) {
2112 ret = t4_systim_to_hwstamp(adapter, skb);
2113 if (ret == RX_PTP_PKT_ERR) {
2115 rxq->stats.rx_drops++;
2119 return RX_NON_PTP_PKT;
2123 * t4_tx_hststamp - Loopback PTP Transmit Event Message
2124 * @adap: the adapter
2126 * @dev: the ingress net device
2128 * Read hardware timestamp for the loopback PTP Tx event message
2130 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
2131 struct net_device *dev)
2133 struct port_info *pi = netdev_priv(dev);
2135 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
2136 cxgb4_ptp_read_hwstamp(adapter, pi);
2144 * t4_ethrx_handler - process an ingress ethernet packet
2145 * @q: the response queue that received the packet
2146 * @rsp: the response queue descriptor holding the RX_PKT message
2147 * @si: the gather list of packet fragments
2149 * Process an ingress ethernet packet and deliver it to the stack.
2151 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
2152 const struct pkt_gl *si)
2155 struct sk_buff *skb;
2156 const struct cpl_rx_pkt *pkt;
2157 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2158 struct adapter *adapter = q->adap;
2159 struct sge *s = &q->adap->sge;
2160 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
2161 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
2163 struct port_info *pi;
2166 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
2167 return handle_trace_pkt(q->adap, si);
2169 pkt = (const struct cpl_rx_pkt *)rsp;
2170 /* Compressed error vector is enabled for T6 only */
2171 if (q->adap->params.tp.rx_pkt_encap)
2172 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
2174 err_vec = be16_to_cpu(pkt->err_vec);
2176 csum_ok = pkt->csum_calc && !err_vec &&
2177 (q->netdev->features & NETIF_F_RXCSUM);
2178 if ((pkt->l2info & htonl(RXF_TCP_F)) &&
2179 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
2180 do_gro(rxq, si, pkt);
2184 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
2185 if (unlikely(!skb)) {
2187 rxq->stats.rx_drops++;
2190 pi = netdev_priv(q->netdev);
2192 /* Handle PTP Event Rx packet */
2193 if (unlikely(pi->ptp_enable)) {
2194 ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
2195 if (ret == RX_PTP_PKT_ERR)
2199 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
2201 /* Handle the PTP Event Tx Loopback packet */
2202 if (unlikely(pi->ptp_enable && !ret &&
2203 (pkt->l2info & htonl(RXF_UDP_F)) &&
2204 cxgb4_ptp_is_ptp_rx(skb))) {
2205 if (!t4_tx_hststamp(adapter, skb, q->netdev))
2209 skb->protocol = eth_type_trans(skb, q->netdev);
2210 skb_record_rx_queue(skb, q->idx);
2211 if (skb->dev->features & NETIF_F_RXHASH)
2212 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2218 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
2220 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
2221 if (!pkt->ip_frag) {
2222 skb->ip_summed = CHECKSUM_UNNECESSARY;
2223 rxq->stats.rx_cso++;
2224 } else if (pkt->l2info & htonl(RXF_IP_F)) {
2225 __sum16 c = (__force __sum16)pkt->csum;
2226 skb->csum = csum_unfold(c);
2227 skb->ip_summed = CHECKSUM_COMPLETE;
2228 rxq->stats.rx_cso++;
2231 skb_checksum_none_assert(skb);
2232 #ifdef CONFIG_CHELSIO_T4_FCOE
2233 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
2234 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
2236 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
2237 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
2238 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
2239 if (q->adap->params.tp.rx_pkt_encap)
2241 T6_COMPR_RXERR_SUM_F;
2243 csum_ok = err_vec & RXERR_CSUM_F;
2245 skb->ip_summed = CHECKSUM_UNNECESSARY;
2249 #undef CPL_RX_PKT_FLAGS
2250 #endif /* CONFIG_CHELSIO_T4_FCOE */
2253 if (unlikely(pkt->vlan_ex)) {
2254 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2255 rxq->stats.vlan_ex++;
2257 skb_mark_napi_id(skb, &q->napi);
2258 netif_receive_skb(skb);
2263 * restore_rx_bufs - put back a packet's Rx buffers
2264 * @si: the packet gather list
2265 * @q: the SGE free list
2266 * @frags: number of FL buffers to restore
2268 * Puts back on an FL the Rx buffers associated with @si. The buffers
2269 * have already been unmapped and are left unmapped, we mark them so to
2270 * prevent further unmapping attempts.
2272 * This function undoes a series of @unmap_rx_buf calls when we find out
2273 * that the current packet can't be processed right away afterall and we
2274 * need to come back to it later. This is a very rare event and there's
2275 * no effort to make this particularly efficient.
2277 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
2280 struct rx_sw_desc *d;
2284 q->cidx = q->size - 1;
2287 d = &q->sdesc[q->cidx];
2288 d->page = si->frags[frags].page;
2289 d->dma_addr |= RX_UNMAPPED_BUF;
2295 * is_new_response - check if a response is newly written
2296 * @r: the response descriptor
2297 * @q: the response queue
2299 * Returns true if a response descriptor contains a yet unprocessed
2302 static inline bool is_new_response(const struct rsp_ctrl *r,
2303 const struct sge_rspq *q)
2305 return (r->type_gen >> RSPD_GEN_S) == q->gen;
2309 * rspq_next - advance to the next entry in a response queue
2312 * Updates the state of a response queue to advance it to the next entry.
2314 static inline void rspq_next(struct sge_rspq *q)
2316 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
2317 if (unlikely(++q->cidx == q->size)) {
2320 q->cur_desc = q->desc;
2325 * process_responses - process responses from an SGE response queue
2326 * @q: the ingress queue to process
2327 * @budget: how many responses can be processed in this round
2329 * Process responses from an SGE response queue up to the supplied budget.
2330 * Responses include received packets as well as control messages from FW
2333 * Additionally choose the interrupt holdoff time for the next interrupt
2334 * on this queue. If the system is under memory shortage use a fairly
2335 * long delay to help recovery.
2337 static int process_responses(struct sge_rspq *q, int budget)
2340 int budget_left = budget;
2341 const struct rsp_ctrl *rc;
2342 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2343 struct adapter *adapter = q->adap;
2344 struct sge *s = &adapter->sge;
2346 while (likely(budget_left)) {
2347 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2348 if (!is_new_response(rc, q)) {
2349 if (q->flush_handler)
2350 q->flush_handler(q);
2355 rsp_type = RSPD_TYPE_G(rc->type_gen);
2356 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
2357 struct page_frag *fp;
2359 const struct rx_sw_desc *rsd;
2360 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
2362 if (len & RSPD_NEWBUF_F) {
2363 if (likely(q->offset > 0)) {
2364 free_rx_bufs(q->adap, &rxq->fl, 1);
2367 len = RSPD_LEN_G(len);
2371 /* gather packet fragments */
2372 for (frags = 0, fp = si.frags; ; frags++, fp++) {
2373 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
2374 bufsz = get_buf_size(adapter, rsd);
2375 fp->page = rsd->page;
2376 fp->offset = q->offset;
2377 fp->size = min(bufsz, len);
2381 unmap_rx_buf(q->adap, &rxq->fl);
2384 si.sgetstamp = SGE_TIMESTAMP_G(
2385 be64_to_cpu(rc->last_flit));
2387 * Last buffer remains mapped so explicitly make it
2388 * coherent for CPU access.
2390 dma_sync_single_for_cpu(q->adap->pdev_dev,
2392 fp->size, DMA_FROM_DEVICE);
2394 si.va = page_address(si.frags[0].page) +
2398 si.nfrags = frags + 1;
2399 ret = q->handler(q, q->cur_desc, &si);
2400 if (likely(ret == 0))
2401 q->offset += ALIGN(fp->size, s->fl_align);
2403 restore_rx_bufs(&si, &rxq->fl, frags);
2404 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
2405 ret = q->handler(q, q->cur_desc, NULL);
2407 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
2410 if (unlikely(ret)) {
2411 /* couldn't process descriptor, back off for recovery */
2412 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
2420 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
2421 __refill_fl(q->adap, &rxq->fl);
2422 return budget - budget_left;
2426 * napi_rx_handler - the NAPI handler for Rx processing
2427 * @napi: the napi instance
2428 * @budget: how many packets we can process in this round
2430 * Handler for new data events when using NAPI. This does not need any
2431 * locking or protection from interrupts as data interrupts are off at
2432 * this point and other adapter interrupts do not interfere (the latter
2433 * in not a concern at all with MSI-X as non-data interrupts then have
2434 * a separate handler).
2436 static int napi_rx_handler(struct napi_struct *napi, int budget)
2438 unsigned int params;
2439 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2443 work_done = process_responses(q, budget);
2444 if (likely(work_done < budget)) {
2447 napi_complete_done(napi, work_done);
2448 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
2450 if (q->adaptive_rx) {
2451 if (work_done > max(timer_pkt_quota[timer_index],
2453 timer_index = (timer_index + 1);
2455 timer_index = timer_index - 1;
2457 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2458 q->next_intr_params =
2459 QINTR_TIMER_IDX_V(timer_index) |
2461 params = q->next_intr_params;
2463 params = q->next_intr_params;
2464 q->next_intr_params = q->intr_params;
2467 params = QINTR_TIMER_IDX_V(7);
2469 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2471 /* If we don't have access to the new User GTS (T5+), use the old
2472 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2474 if (unlikely(q->bar2_addr == NULL)) {
2475 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2476 val | INGRESSQID_V((u32)q->cntxt_id));
2478 writel(val | INGRESSQID_V(q->bar2_qid),
2479 q->bar2_addr + SGE_UDB_GTS);
2486 * The MSI-X interrupt handler for an SGE response queue.
2488 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2490 struct sge_rspq *q = cookie;
2492 napi_schedule(&q->napi);
2497 * Process the indirect interrupt entries in the interrupt queue and kick off
2498 * NAPI for each queue that has generated an entry.
2500 static unsigned int process_intrq(struct adapter *adap)
2502 unsigned int credits;
2503 const struct rsp_ctrl *rc;
2504 struct sge_rspq *q = &adap->sge.intrq;
2507 spin_lock(&adap->sge.intrq_lock);
2508 for (credits = 0; ; credits++) {
2509 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2510 if (!is_new_response(rc, q))
2514 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
2515 unsigned int qid = ntohl(rc->pldbuflen_qid);
2517 qid -= adap->sge.ingr_start;
2518 napi_schedule(&adap->sge.ingr_map[qid]->napi);
2524 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2526 /* If we don't have access to the new User GTS (T5+), use the old
2527 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2529 if (unlikely(q->bar2_addr == NULL)) {
2530 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2531 val | INGRESSQID_V(q->cntxt_id));
2533 writel(val | INGRESSQID_V(q->bar2_qid),
2534 q->bar2_addr + SGE_UDB_GTS);
2537 spin_unlock(&adap->sge.intrq_lock);
2542 * The MSI interrupt handler, which handles data events from SGE response queues
2543 * as well as error and other async events as they all use the same MSI vector.
2545 static irqreturn_t t4_intr_msi(int irq, void *cookie)
2547 struct adapter *adap = cookie;
2549 if (adap->flags & MASTER_PF)
2550 t4_slow_intr_handler(adap);
2551 process_intrq(adap);
2556 * Interrupt handler for legacy INTx interrupts.
2557 * Handles data events from SGE response queues as well as error and other
2558 * async events as they all use the same interrupt line.
2560 static irqreturn_t t4_intr_intx(int irq, void *cookie)
2562 struct adapter *adap = cookie;
2564 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2565 if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
2566 process_intrq(adap))
2568 return IRQ_NONE; /* probably shared interrupt */
2572 * t4_intr_handler - select the top-level interrupt handler
2573 * @adap: the adapter
2575 * Selects the top-level interrupt handler based on the type of interrupts
2576 * (MSI-X, MSI, or INTx).
2578 irq_handler_t t4_intr_handler(struct adapter *adap)
2580 if (adap->flags & USING_MSIX)
2581 return t4_sge_intr_msix;
2582 if (adap->flags & USING_MSI)
2584 return t4_intr_intx;
2587 static void sge_rx_timer_cb(unsigned long data)
2591 struct adapter *adap = (struct adapter *)data;
2592 struct sge *s = &adap->sge;
2594 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2595 for (m = s->starving_fl[i]; m; m &= m - 1) {
2596 struct sge_eth_rxq *rxq;
2597 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2598 struct sge_fl *fl = s->egr_map[id];
2600 clear_bit(id, s->starving_fl);
2601 smp_mb__after_atomic();
2603 if (fl_starving(adap, fl)) {
2604 rxq = container_of(fl, struct sge_eth_rxq, fl);
2605 if (napi_reschedule(&rxq->rspq.napi))
2608 set_bit(id, s->starving_fl);
2611 /* The remainder of the SGE RX Timer Callback routine is dedicated to
2612 * global Master PF activities like checking for chip ingress stalls,
2615 if (!(adap->flags & MASTER_PF))
2618 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
2621 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2624 static void sge_tx_timer_cb(unsigned long data)
2627 unsigned int i, budget;
2628 struct adapter *adap = (struct adapter *)data;
2629 struct sge *s = &adap->sge;
2631 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2632 for (m = s->txq_maperr[i]; m; m &= m - 1) {
2633 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2634 struct sge_uld_txq *txq = s->egr_map[id];
2636 clear_bit(id, s->txq_maperr);
2637 tasklet_schedule(&txq->qresume_tsk);
2640 if (!is_t4(adap->params.chip)) {
2641 struct sge_eth_txq *q = &s->ptptxq;
2644 spin_lock(&adap->ptp_lock);
2645 avail = reclaimable(&q->q);
2648 free_tx_desc(adap, &q->q, avail, false);
2649 q->q.in_use -= avail;
2651 spin_unlock(&adap->ptp_lock);
2654 budget = MAX_TIMER_TX_RECLAIM;
2655 i = s->ethtxq_rover;
2657 struct sge_eth_txq *q = &s->ethtxq[i];
2660 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2661 __netif_tx_trylock(q->txq)) {
2662 int avail = reclaimable(&q->q);
2668 free_tx_desc(adap, &q->q, avail, true);
2669 q->q.in_use -= avail;
2672 __netif_tx_unlock(q->txq);
2675 if (++i >= s->ethqsets)
2677 } while (budget && i != s->ethtxq_rover);
2678 s->ethtxq_rover = i;
2679 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2683 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2684 * @adapter: the adapter
2685 * @qid: the SGE Queue ID
2686 * @qtype: the SGE Queue Type (Egress or Ingress)
2687 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2689 * Returns the BAR2 address for the SGE Queue Registers associated with
2690 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2691 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2692 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2693 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2695 static void __iomem *bar2_address(struct adapter *adapter,
2697 enum t4_bar2_qtype qtype,
2698 unsigned int *pbar2_qid)
2703 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
2704 &bar2_qoffset, pbar2_qid);
2708 return adapter->bar2 + bar2_qoffset;
2711 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
2712 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
2714 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2715 struct net_device *dev, int intr_idx,
2716 struct sge_fl *fl, rspq_handler_t hnd,
2717 rspq_flush_handler_t flush_hnd, int cong)
2721 struct sge *s = &adap->sge;
2722 struct port_info *pi = netdev_priv(dev);
2723 int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
2725 /* Size needs to be multiple of 16, including status entry. */
2726 iq->size = roundup(iq->size, 16);
2728 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2729 &iq->phys_addr, NULL, 0,
2730 dev_to_node(adap->pdev_dev));
2734 memset(&c, 0, sizeof(c));
2735 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
2736 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2737 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
2738 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
2740 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2741 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
2742 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
2743 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
2744 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2746 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
2747 FW_IQ_CMD_IQGTSMODE_F |
2748 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
2749 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
2750 c.iqsize = htons(iq->size);
2751 c.iqaddr = cpu_to_be64(iq->phys_addr);
2753 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
2756 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
2758 /* Allocate the ring for the hardware free list (with space
2759 * for its status page) along with the associated software
2760 * descriptor ring. The free list size needs to be a multiple
2761 * of the Egress Queue Unit and at least 2 Egress Units larger
2762 * than the SGE's Egress Congrestion Threshold
2763 * (fl_starve_thres - 1).
2765 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
2766 fl->size = s->fl_starve_thres - 1 + 2 * 8;
2767 fl->size = roundup(fl->size, 8);
2768 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2769 sizeof(struct rx_sw_desc), &fl->addr,
2770 &fl->sdesc, s->stat_len,
2771 dev_to_node(adap->pdev_dev));
2775 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2776 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
2777 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2778 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2779 FW_IQ_CMD_FL0PADEN_F);
2781 c.iqns_to_fl0congen |=
2782 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
2783 FW_IQ_CMD_FL0CONGCIF_F |
2784 FW_IQ_CMD_FL0CONGEN_F);
2785 /* In T6, for egress queue type FL there is internal overhead
2786 * of 16B for header going into FLM module. Hence the maximum
2787 * allowed burst size is 448 bytes. For T4/T5, the hardware
2788 * doesn't coalesce fetch requests if more than 64 bytes of
2789 * Free List pointers are provided, so we use a 128-byte Fetch
2790 * Burst Minimum there (T6 implements coalescing so we can use
2791 * the smaller 64-byte value there).
2793 c.fl0dcaen_to_fl0cidxfthresh =
2794 htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
2795 FETCHBURSTMIN_128B_X :
2796 FETCHBURSTMIN_64B_X) |
2797 FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
2798 FETCHBURSTMAX_512B_X :
2799 FETCHBURSTMAX_256B_X));
2800 c.fl0size = htons(flsz);
2801 c.fl0addr = cpu_to_be64(fl->addr);
2804 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2808 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2809 iq->cur_desc = iq->desc;
2812 iq->next_intr_params = iq->intr_params;
2813 iq->cntxt_id = ntohs(c.iqid);
2814 iq->abs_id = ntohs(c.physiqid);
2815 iq->bar2_addr = bar2_address(adap,
2817 T4_BAR2_QTYPE_INGRESS,
2819 iq->size--; /* subtract status entry */
2822 iq->flush_handler = flush_hnd;
2824 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
2825 skb_queue_head_init(&iq->lro_mgr.lroq);
2827 /* set offset to -1 to distinguish ingress queues without FL */
2828 iq->offset = fl ? 0 : -1;
2830 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2833 fl->cntxt_id = ntohs(c.fl0id);
2834 fl->avail = fl->pend_cred = 0;
2835 fl->pidx = fl->cidx = 0;
2836 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2837 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2839 /* Note, we must initialize the BAR2 Free List User Doorbell
2840 * information before refilling the Free List!
2842 fl->bar2_addr = bar2_address(adap,
2844 T4_BAR2_QTYPE_EGRESS,
2846 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2849 /* For T5 and later we attempt to set up the Congestion Manager values
2850 * of the new RX Ethernet Queue. This should really be handled by
2851 * firmware because it's more complex than any host driver wants to
2852 * get involved with and it's different per chip and this is almost
2853 * certainly wrong. Firmware would be wrong as well, but it would be
2854 * a lot easier to fix in one place ... For now we do something very
2855 * simple (and hopefully less wrong).
2857 if (!is_t4(adap->params.chip) && cong >= 0) {
2858 u32 param, val, ch_map = 0;
2860 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
2862 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2863 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
2864 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
2866 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
2869 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
2870 for (i = 0; i < 4; i++) {
2871 if (cong & (1 << i))
2872 ch_map |= 1 << (i << cng_ch_bits_log);
2874 val |= CONMCTXT_CNGCHMAP_V(ch_map);
2876 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
2879 dev_warn(adap->pdev_dev, "Failed to set Congestion"
2880 " Manager Context for Ingress Queue %d: %d\n",
2881 iq->cntxt_id, -ret);
2890 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2891 iq->desc, iq->phys_addr);
2894 if (fl && fl->desc) {
2897 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2898 fl->desc, fl->addr);
2904 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2907 q->bar2_addr = bar2_address(adap,
2909 T4_BAR2_QTYPE_EGRESS,
2912 q->cidx = q->pidx = 0;
2913 q->stops = q->restarts = 0;
2914 q->stat = (void *)&q->desc[q->size];
2915 spin_lock_init(&q->db_lock);
2916 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2919 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2920 struct net_device *dev, struct netdev_queue *netdevq,
2924 struct fw_eq_eth_cmd c;
2925 struct sge *s = &adap->sge;
2926 struct port_info *pi = netdev_priv(dev);
2928 /* Add status entries */
2929 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2931 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2932 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2933 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2934 netdev_queue_numa_node_read(netdevq));
2938 memset(&c, 0, sizeof(c));
2939 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
2940 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2941 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
2942 FW_EQ_ETH_CMD_VFN_V(0));
2943 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
2944 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
2945 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2946 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2947 c.fetchszm_to_iqid =
2948 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2949 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
2950 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
2952 htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2953 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2954 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2955 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2956 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2958 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2960 kfree(txq->q.sdesc);
2961 txq->q.sdesc = NULL;
2962 dma_free_coherent(adap->pdev_dev,
2963 nentries * sizeof(struct tx_desc),
2964 txq->q.desc, txq->q.phys_addr);
2969 txq->q.q_type = CXGB4_TXQ_ETH;
2970 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
2972 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2973 txq->mapping_err = 0;
2977 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2978 struct net_device *dev, unsigned int iqid,
2979 unsigned int cmplqid)
2982 struct fw_eq_ctrl_cmd c;
2983 struct sge *s = &adap->sge;
2984 struct port_info *pi = netdev_priv(dev);
2986 /* Add status entries */
2987 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2989 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2990 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2991 NULL, 0, dev_to_node(adap->pdev_dev));
2995 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
2996 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2997 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
2998 FW_EQ_CTRL_CMD_VFN_V(0));
2999 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
3000 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
3001 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
3002 c.physeqid_pkd = htonl(0);
3003 c.fetchszm_to_iqid =
3004 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3005 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
3006 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
3008 htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3009 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3010 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3011 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
3012 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3014 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3016 dma_free_coherent(adap->pdev_dev,
3017 nentries * sizeof(struct tx_desc),
3018 txq->q.desc, txq->q.phys_addr);
3023 txq->q.q_type = CXGB4_TXQ_CTRL;
3024 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
3026 skb_queue_head_init(&txq->sendq);
3027 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
3032 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
3033 unsigned int cmplqid)
3037 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
3038 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
3039 FW_PARAMS_PARAM_YZ_V(eqid));
3041 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
3044 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
3045 struct net_device *dev, unsigned int iqid,
3046 unsigned int uld_type)
3049 struct fw_eq_ofld_cmd c;
3050 struct sge *s = &adap->sge;
3051 struct port_info *pi = netdev_priv(dev);
3052 int cmd = FW_EQ_OFLD_CMD;
3054 /* Add status entries */
3055 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3057 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
3058 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
3059 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
3064 memset(&c, 0, sizeof(c));
3065 if (unlikely(uld_type == CXGB4_TX_CRYPTO))
3066 cmd = FW_EQ_CTRL_CMD;
3067 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
3068 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3069 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
3070 FW_EQ_OFLD_CMD_VFN_V(0));
3071 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
3072 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
3073 c.fetchszm_to_iqid =
3074 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3075 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
3076 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
3078 htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3079 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3080 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3081 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
3082 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3084 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3086 kfree(txq->q.sdesc);
3087 txq->q.sdesc = NULL;
3088 dma_free_coherent(adap->pdev_dev,
3089 nentries * sizeof(struct tx_desc),
3090 txq->q.desc, txq->q.phys_addr);
3095 txq->q.q_type = CXGB4_TXQ_ULD;
3096 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
3098 skb_queue_head_init(&txq->sendq);
3099 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
3101 txq->mapping_err = 0;
3105 void free_txq(struct adapter *adap, struct sge_txq *q)
3107 struct sge *s = &adap->sge;
3109 dma_free_coherent(adap->pdev_dev,
3110 q->size * sizeof(struct tx_desc) + s->stat_len,
3111 q->desc, q->phys_addr);
3117 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
3120 struct sge *s = &adap->sge;
3121 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
3123 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
3124 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
3125 rq->cntxt_id, fl_id, 0xffff);
3126 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
3127 rq->desc, rq->phys_addr);
3128 netif_napi_del(&rq->napi);
3130 rq->cntxt_id = rq->abs_id = 0;
3134 free_rx_bufs(adap, fl, fl->avail);
3135 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
3136 fl->desc, fl->addr);
3145 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
3146 * @adap: the adapter
3147 * @n: number of queues
3148 * @q: pointer to first queue
3150 * Release the resources of a consecutive block of offload Rx queues.
3152 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
3154 for ( ; n; n--, q++)
3156 free_rspq_fl(adap, &q->rspq,
3157 q->fl.size ? &q->fl : NULL);
3161 * t4_free_sge_resources - free SGE resources
3162 * @adap: the adapter
3164 * Frees resources used by the SGE queue sets.
3166 void t4_free_sge_resources(struct adapter *adap)
3169 struct sge_eth_rxq *eq;
3170 struct sge_eth_txq *etq;
3172 /* stop all Rx queues in order to start them draining */
3173 for (i = 0; i < adap->sge.ethqsets; i++) {
3174 eq = &adap->sge.ethrxq[i];
3176 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
3177 FW_IQ_TYPE_FL_INT_CAP,
3179 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
3183 /* clean up Ethernet Tx/Rx queues */
3184 for (i = 0; i < adap->sge.ethqsets; i++) {
3185 eq = &adap->sge.ethrxq[i];
3187 free_rspq_fl(adap, &eq->rspq,
3188 eq->fl.size ? &eq->fl : NULL);
3190 etq = &adap->sge.ethtxq[i];
3192 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3194 __netif_tx_lock_bh(etq->txq);
3195 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3196 __netif_tx_unlock_bh(etq->txq);
3197 kfree(etq->q.sdesc);
3198 free_txq(adap, &etq->q);
3202 /* clean up control Tx queues */
3203 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
3204 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
3207 tasklet_kill(&cq->qresume_tsk);
3208 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
3210 __skb_queue_purge(&cq->sendq);
3211 free_txq(adap, &cq->q);
3215 if (adap->sge.fw_evtq.desc)
3216 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
3218 if (adap->sge.intrq.desc)
3219 free_rspq_fl(adap, &adap->sge.intrq, NULL);
3221 if (!is_t4(adap->params.chip)) {
3222 etq = &adap->sge.ptptxq;
3224 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3226 spin_lock_bh(&adap->ptp_lock);
3227 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3228 spin_unlock_bh(&adap->ptp_lock);
3229 kfree(etq->q.sdesc);
3230 free_txq(adap, &etq->q);
3234 /* clear the reverse egress queue map */
3235 memset(adap->sge.egr_map, 0,
3236 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
3239 void t4_sge_start(struct adapter *adap)
3241 adap->sge.ethtxq_rover = 0;
3242 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
3243 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
3247 * t4_sge_stop - disable SGE operation
3248 * @adap: the adapter
3250 * Stop tasklets and timers associated with the DMA engine. Note that
3251 * this is effective only if measures have been taken to disable any HW
3252 * events that may restart them.
3254 void t4_sge_stop(struct adapter *adap)
3257 struct sge *s = &adap->sge;
3259 if (in_interrupt()) /* actions below require waiting */
3262 if (s->rx_timer.function)
3263 del_timer_sync(&s->rx_timer);
3264 if (s->tx_timer.function)
3265 del_timer_sync(&s->tx_timer);
3267 if (is_offload(adap)) {
3268 struct sge_uld_txq_info *txq_info;
3270 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3272 struct sge_uld_txq *txq = txq_info->uldtxq;
3274 for_each_ofldtxq(&adap->sge, i) {
3276 tasklet_kill(&txq->qresume_tsk);
3281 if (is_pci_uld(adap)) {
3282 struct sge_uld_txq_info *txq_info;
3284 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
3286 struct sge_uld_txq *txq = txq_info->uldtxq;
3288 for_each_ofldtxq(&adap->sge, i) {
3290 tasklet_kill(&txq->qresume_tsk);
3295 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
3296 struct sge_ctrl_txq *cq = &s->ctrlq[i];
3299 tasklet_kill(&cq->qresume_tsk);
3304 * t4_sge_init_soft - grab core SGE values needed by SGE code
3305 * @adap: the adapter
3307 * We need to grab the SGE operating parameters that we need to have
3308 * in order to do our job and make sure we can live with them.
3311 static int t4_sge_init_soft(struct adapter *adap)
3313 struct sge *s = &adap->sge;
3314 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
3315 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
3316 u32 ingress_rx_threshold;
3319 * Verify that CPL messages are going to the Ingress Queue for
3320 * process_responses() and that only packet data is going to the
3323 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
3324 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
3325 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
3330 * Validate the Host Buffer Register Array indices that we want to
3333 * XXX Note that we should really read through the Host Buffer Size
3334 * XXX register array and find the indices of the Buffer Sizes which
3335 * XXX meet our needs!
3337 #define READ_FL_BUF(x) \
3338 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
3340 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
3341 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
3342 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
3343 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
3345 /* We only bother using the Large Page logic if the Large Page Buffer
3346 * is larger than our Page Size Buffer.
3348 if (fl_large_pg <= fl_small_pg)
3353 /* The Page Size Buffer must be exactly equal to our Page Size and the
3354 * Large Page Size Buffer should be 0 (per above) or a power of 2.
3356 if (fl_small_pg != PAGE_SIZE ||
3357 (fl_large_pg & (fl_large_pg-1)) != 0) {
3358 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
3359 fl_small_pg, fl_large_pg);
3363 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
3365 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
3366 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
3367 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
3368 fl_small_mtu, fl_large_mtu);
3373 * Retrieve our RX interrupt holdoff timer values and counter
3374 * threshold values from the SGE parameters.
3376 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
3377 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
3378 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
3379 s->timer_val[0] = core_ticks_to_us(adap,
3380 TIMERVALUE0_G(timer_value_0_and_1));
3381 s->timer_val[1] = core_ticks_to_us(adap,
3382 TIMERVALUE1_G(timer_value_0_and_1));
3383 s->timer_val[2] = core_ticks_to_us(adap,
3384 TIMERVALUE2_G(timer_value_2_and_3));
3385 s->timer_val[3] = core_ticks_to_us(adap,
3386 TIMERVALUE3_G(timer_value_2_and_3));
3387 s->timer_val[4] = core_ticks_to_us(adap,
3388 TIMERVALUE4_G(timer_value_4_and_5));
3389 s->timer_val[5] = core_ticks_to_us(adap,
3390 TIMERVALUE5_G(timer_value_4_and_5));
3392 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
3393 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
3394 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
3395 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
3396 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
3402 * t4_sge_init - initialize SGE
3403 * @adap: the adapter
3405 * Perform low-level SGE code initialization needed every time after a
3408 int t4_sge_init(struct adapter *adap)
3410 struct sge *s = &adap->sge;
3411 u32 sge_control, sge_conm_ctrl;
3412 int ret, egress_threshold;
3415 * Ingress Padding Boundary and Egress Status Page Size are set up by
3416 * t4_fixup_host_params().
3418 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
3419 s->pktshift = PKTSHIFT_G(sge_control);
3420 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
3422 s->fl_align = t4_fl_pkt_align(adap);
3423 ret = t4_sge_init_soft(adap);
3428 * A FL with <= fl_starve_thres buffers is starving and a periodic
3429 * timer will attempt to refill it. This needs to be larger than the
3430 * SGE's Egress Congestion Threshold. If it isn't, then we can get
3431 * stuck waiting for new packets while the SGE is waiting for us to
3432 * give it more Free List entries. (Note that the SGE's Egress
3433 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
3434 * there was only a single field to control this. For T5 there's the
3435 * original field which now only applies to Unpacked Mode Free List
3436 * buffers and a new field which only applies to Packed Mode Free List
3439 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
3440 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
3442 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
3445 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3448 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3451 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
3452 CHELSIO_CHIP_VERSION(adap->params.chip));
3455 s->fl_starve_thres = 2*egress_threshold + 1;
3457 t4_idma_monitor_init(adap, &s->idma_monitor);
3459 /* Set up timers used for recuring callbacks to process RX and TX
3460 * administrative tasks.
3462 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
3463 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
3465 spin_lock_init(&s->intrq_lock);