1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include <linux/cache.h>
21 #include "net_driver.h"
25 #include "workarounds.h"
26 #include "ef10_regs.h"
30 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
31 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33 #endif /* EFX_USE_PIO */
35 static inline unsigned int
36 efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
38 return tx_queue->insert_count & tx_queue->ptr_mask;
41 static inline struct efx_tx_buffer *
42 __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
44 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
47 static inline struct efx_tx_buffer *
48 efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
50 struct efx_tx_buffer *buffer =
51 __efx_tx_queue_get_insert_buffer(tx_queue);
53 EFX_BUG_ON_PARANOID(buffer->len);
54 EFX_BUG_ON_PARANOID(buffer->flags);
55 EFX_BUG_ON_PARANOID(buffer->unmap_len);
60 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
61 struct efx_tx_buffer *buffer,
62 unsigned int *pkts_compl,
63 unsigned int *bytes_compl)
65 if (buffer->unmap_len) {
66 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
67 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
68 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
69 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
72 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
74 buffer->unmap_len = 0;
77 if (buffer->flags & EFX_TX_BUF_SKB) {
79 (*bytes_compl) += buffer->skb->len;
80 dev_consume_skb_any((struct sk_buff *)buffer->skb);
81 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
82 "TX queue %d transmission id %x complete\n",
83 tx_queue->queue, tx_queue->read_count);
84 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
85 kfree(buffer->heap_buf);
92 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
95 static inline unsigned
96 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
98 /* Depending on the NIC revision, we can use descriptor
99 * lengths up to 8K or 8K-1. However, since PCI Express
100 * devices must split read requests at 4K boundaries, there is
101 * little benefit from using descriptors that cross those
102 * boundaries and we keep things simple by not doing so.
104 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
106 /* Work around hardware bug for unaligned buffers. */
107 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
108 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
113 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
115 /* Header and payload descriptor for each output segment, plus
116 * one for every input fragment boundary within a segment
118 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
120 /* Possibly one more per segment for the alignment workaround,
121 * or for option descriptors
123 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
124 max_descs += EFX_TSO_MAX_SEGS;
126 /* Possibly more for PCIe page boundaries within input fragments */
127 if (PAGE_SIZE > EFX_PAGE_SIZE)
128 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
129 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
134 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
136 /* We need to consider both queues that the net core sees as one */
137 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
138 struct efx_nic *efx = txq1->efx;
139 unsigned int fill_level;
141 fill_level = max(txq1->insert_count - txq1->old_read_count,
142 txq2->insert_count - txq2->old_read_count);
143 if (likely(fill_level < efx->txq_stop_thresh))
146 /* We used the stale old_read_count above, which gives us a
147 * pessimistic estimate of the fill level (which may even
148 * validly be >= efx->txq_entries). Now try again using
149 * read_count (more likely to be a cache miss).
151 * If we read read_count and then conditionally stop the
152 * queue, it is possible for the completion path to race with
153 * us and complete all outstanding descriptors in the middle,
154 * after which there will be no more completions to wake it.
155 * Therefore we stop the queue first, then read read_count
156 * (with a memory barrier to ensure the ordering), then
157 * restart the queue if the fill level turns out to be low
160 netif_tx_stop_queue(txq1->core_txq);
162 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
163 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
165 fill_level = max(txq1->insert_count - txq1->old_read_count,
166 txq2->insert_count - txq2->old_read_count);
167 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
168 if (likely(fill_level < efx->txq_stop_thresh)) {
170 if (likely(!efx->loopback_selftest))
171 netif_tx_start_queue(txq1->core_txq);
177 struct efx_short_copy_buffer {
179 u8 buf[L1_CACHE_BYTES];
182 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
183 * Advances piobuf pointer. Leaves additional data in the copy buffer.
185 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
187 struct efx_short_copy_buffer *copy_buf)
189 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
191 __iowrite64_copy(*piobuf, data, block_len >> 3);
192 *piobuf += block_len;
197 BUG_ON(copy_buf->used);
198 BUG_ON(len > sizeof(copy_buf->buf));
199 memcpy(copy_buf->buf, data, len);
200 copy_buf->used = len;
204 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
205 * Advances piobuf pointer. Leaves additional data in the copy buffer.
207 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
209 struct efx_short_copy_buffer *copy_buf)
211 if (copy_buf->used) {
212 /* if the copy buffer is partially full, fill it up and write */
214 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
216 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
217 copy_buf->used += copy_to_buf;
219 /* if we didn't fill it up then we're done for now */
220 if (copy_buf->used < sizeof(copy_buf->buf))
223 __iowrite64_copy(*piobuf, copy_buf->buf,
224 sizeof(copy_buf->buf) >> 3);
225 *piobuf += sizeof(copy_buf->buf);
231 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
234 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
235 struct efx_short_copy_buffer *copy_buf)
237 /* if there's anything in it, write the whole buffer, including junk */
239 __iowrite64_copy(piobuf, copy_buf->buf,
240 sizeof(copy_buf->buf) >> 3);
243 /* Traverse skb structure and copy fragments in to PIO buffer.
244 * Advances piobuf pointer.
246 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
248 struct efx_short_copy_buffer *copy_buf)
252 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
255 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
256 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
259 vaddr = kmap_atomic(skb_frag_page(f));
261 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
262 skb_frag_size(f), copy_buf);
263 kunmap_atomic(vaddr);
266 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
269 static struct efx_tx_buffer *
270 efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
272 struct efx_tx_buffer *buffer =
273 efx_tx_queue_get_insert_buffer(tx_queue);
274 u8 __iomem *piobuf = tx_queue->piobuf;
276 /* Copy to PIO buffer. Ensure the writes are padded to the end
277 * of a cache line, as this is required for write-combining to be
278 * effective on at least x86.
281 if (skb_shinfo(skb)->nr_frags) {
282 /* The size of the copy buffer will ensure all writes
283 * are the size of a cache line.
285 struct efx_short_copy_buffer copy_buf;
289 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
291 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
293 /* Pad the write to the size of a cache line.
294 * We can do this because we know the skb_shared_info sruct is
295 * after the source, and the destination buffer is big enough.
297 BUILD_BUG_ON(L1_CACHE_BYTES >
298 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
299 __iowrite64_copy(tx_queue->piobuf, skb->data,
300 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
303 EFX_POPULATE_QWORD_5(buffer->option,
304 ESF_DZ_TX_DESC_IS_OPT, 1,
305 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
306 ESF_DZ_TX_PIO_CONT, 0,
307 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
308 ESF_DZ_TX_PIO_BUF_ADDR,
309 tx_queue->piobuf_offset);
310 ++tx_queue->pio_packets;
311 ++tx_queue->insert_count;
314 #endif /* EFX_USE_PIO */
317 * Add a socket buffer to a TX queue
319 * This maps all fragments of a socket buffer for DMA and adds them to
320 * the TX queue. The queue's insert pointer will be incremented by
321 * the number of fragments in the socket buffer.
323 * If any DMA mapping fails, any mapped fragments will be unmapped,
324 * the queue's insert pointer will be restored to its original value.
326 * This function is split out from efx_hard_start_xmit to allow the
327 * loopback test to direct packets via specific TX queues.
329 * Returns NETDEV_TX_OK.
330 * You must hold netif_tx_lock() to call this function.
332 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
334 struct efx_nic *efx = tx_queue->efx;
335 struct device *dma_dev = &efx->pci_dev->dev;
336 struct efx_tx_buffer *buffer;
337 unsigned int old_insert_count = tx_queue->insert_count;
338 skb_frag_t *fragment;
339 unsigned int len, unmap_len = 0;
340 dma_addr_t dma_addr, unmap_addr = 0;
341 unsigned int dma_len;
342 unsigned short dma_flags;
345 if (skb_shinfo(skb)->gso_size)
346 return efx_enqueue_skb_tso(tx_queue, skb);
348 /* Get size of the initial fragment */
349 len = skb_headlen(skb);
351 /* Pad if necessary */
352 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
353 EFX_BUG_ON_PARANOID(skb->data_len);
355 if (skb_pad(skb, len - skb->len))
359 /* Consider using PIO for short packets */
361 if (skb->len <= efx_piobuf_size && !skb->xmit_more &&
362 efx_nic_may_tx_pio(tx_queue)) {
363 buffer = efx_enqueue_skb_pio(tx_queue, skb);
364 dma_flags = EFX_TX_BUF_OPTION;
369 /* Map for DMA. Use dma_map_single rather than dma_map_page
370 * since this is more efficient on machines with sparse
373 dma_flags = EFX_TX_BUF_MAP_SINGLE;
374 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
376 /* Process all fragments */
378 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
381 /* Store fields for marking in the per-fragment final
384 unmap_addr = dma_addr;
386 /* Add to TX queue, splitting across DMA boundaries */
388 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
390 dma_len = efx_max_tx_len(efx, dma_addr);
391 if (likely(dma_len >= len))
394 /* Fill out per descriptor fields */
395 buffer->len = dma_len;
396 buffer->dma_addr = dma_addr;
397 buffer->flags = EFX_TX_BUF_CONT;
400 ++tx_queue->insert_count;
403 /* Transfer ownership of the unmapping to the final buffer */
404 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
405 buffer->unmap_len = unmap_len;
406 buffer->dma_offset = buffer->dma_addr - unmap_addr;
409 /* Get address and size of next fragment */
410 if (i >= skb_shinfo(skb)->nr_frags)
412 fragment = &skb_shinfo(skb)->frags[i];
413 len = skb_frag_size(fragment);
417 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
421 /* Transfer ownership of the skb to the final buffer */
426 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
428 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
430 efx_tx_maybe_stop_queue(tx_queue);
432 /* Pass off to hardware */
433 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
434 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
436 /* There could be packets left on the partner queue if those
437 * SKBs had skb->xmit_more set. If we do not push those they
438 * could be left for a long time and cause a netdev watchdog.
440 if (txq2->xmit_more_available)
441 efx_nic_push_buffers(txq2);
443 efx_nic_push_buffers(tx_queue);
445 tx_queue->xmit_more_available = skb->xmit_more;
448 tx_queue->tx_packets++;
453 netif_err(efx, tx_err, efx->net_dev,
454 " TX queue %d could not map skb with %d bytes %d "
455 "fragments for DMA\n", tx_queue->queue, skb->len,
456 skb_shinfo(skb)->nr_frags + 1);
458 /* Mark the packet as transmitted, and free the SKB ourselves */
459 dev_kfree_skb_any(skb);
461 /* Work backwards until we hit the original insert pointer value */
462 while (tx_queue->insert_count != old_insert_count) {
463 unsigned int pkts_compl = 0, bytes_compl = 0;
464 --tx_queue->insert_count;
465 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
466 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
469 /* Free the fragment we were mid-way through pushing */
471 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
472 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
475 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
482 /* Remove packets from the TX queue
484 * This removes packets from the TX queue, up to and including the
487 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
489 unsigned int *pkts_compl,
490 unsigned int *bytes_compl)
492 struct efx_nic *efx = tx_queue->efx;
493 unsigned int stop_index, read_ptr;
495 stop_index = (index + 1) & tx_queue->ptr_mask;
496 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
498 while (read_ptr != stop_index) {
499 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
501 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
502 unlikely(buffer->len == 0)) {
503 netif_err(efx, tx_err, efx->net_dev,
504 "TX queue %d spurious TX completion id %x\n",
505 tx_queue->queue, read_ptr);
506 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
510 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
512 ++tx_queue->read_count;
513 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
517 /* Initiate a packet transmission. We use one channel per CPU
518 * (sharing when we have more CPUs than channels). On Falcon, the TX
519 * completion events will be directed back to the CPU that transmitted
520 * the packet, which should be cache-efficient.
522 * Context: non-blocking.
523 * Note that returning anything other than NETDEV_TX_OK will cause the
524 * OS to free the skb.
526 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
527 struct net_device *net_dev)
529 struct efx_nic *efx = netdev_priv(net_dev);
530 struct efx_tx_queue *tx_queue;
531 unsigned index, type;
533 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
535 /* PTP "event" packet */
536 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
537 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
538 return efx_ptp_tx(efx, skb);
541 index = skb_get_queue_mapping(skb);
542 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
543 if (index >= efx->n_tx_channels) {
544 index -= efx->n_tx_channels;
545 type |= EFX_TXQ_TYPE_HIGHPRI;
547 tx_queue = efx_get_tx_queue(efx, index, type);
549 return efx_enqueue_skb(tx_queue, skb);
552 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
554 struct efx_nic *efx = tx_queue->efx;
556 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
558 netdev_get_tx_queue(efx->net_dev,
559 tx_queue->queue / EFX_TXQ_TYPES +
560 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
561 efx->n_tx_channels : 0));
564 int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
565 struct tc_to_netdev *ntc)
567 struct efx_nic *efx = netdev_priv(net_dev);
568 struct efx_channel *channel;
569 struct efx_tx_queue *tx_queue;
573 if (ntc->type != TC_SETUP_MQPRIO)
578 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
581 if (num_tc == net_dev->num_tc)
584 for (tc = 0; tc < num_tc; tc++) {
585 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
586 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
589 if (num_tc > net_dev->num_tc) {
590 /* Initialise high-priority queues as necessary */
591 efx_for_each_channel(channel, efx) {
592 efx_for_each_possible_channel_tx_queue(tx_queue,
594 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
596 if (!tx_queue->buffer) {
597 rc = efx_probe_tx_queue(tx_queue);
601 if (!tx_queue->initialised)
602 efx_init_tx_queue(tx_queue);
603 efx_init_tx_queue_core_txq(tx_queue);
607 /* Reduce number of classes before number of queues */
608 net_dev->num_tc = num_tc;
611 rc = netif_set_real_num_tx_queues(net_dev,
612 max_t(int, num_tc, 1) *
617 /* Do not destroy high-priority queues when they become
618 * unused. We would have to flush them first, and it is
619 * fairly difficult to flush a subset of TX queues. Leave
620 * it to efx_fini_channels().
623 net_dev->num_tc = num_tc;
627 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
630 struct efx_nic *efx = tx_queue->efx;
631 struct efx_tx_queue *txq2;
632 unsigned int pkts_compl = 0, bytes_compl = 0;
634 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
636 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
637 tx_queue->pkts_compl += pkts_compl;
638 tx_queue->bytes_compl += bytes_compl;
641 ++tx_queue->merge_events;
643 /* See if we need to restart the netif queue. This memory
644 * barrier ensures that we write read_count (inside
645 * efx_dequeue_buffers()) before reading the queue status.
648 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
649 likely(efx->port_enabled) &&
650 likely(netif_device_present(efx->net_dev))) {
651 txq2 = efx_tx_queue_partner(tx_queue);
652 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
653 txq2->insert_count - txq2->read_count);
654 if (fill_level <= efx->txq_wake_thresh)
655 netif_tx_wake_queue(tx_queue->core_txq);
658 /* Check whether the hardware queue is now empty */
659 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
660 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
661 if (tx_queue->read_count == tx_queue->old_write_count) {
663 tx_queue->empty_read_count =
664 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
669 /* Size of page-based TSO header buffers. Larger blocks must be
670 * allocated from the heap.
672 #define TSOH_STD_SIZE 128
673 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
675 /* At most half the descriptors in the queue at any time will refer to
676 * a TSO header buffer, since they must always be followed by a
677 * payload descriptor referring to an skb.
679 static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
681 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
684 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
686 struct efx_nic *efx = tx_queue->efx;
687 unsigned int entries;
690 /* Create the smallest power-of-two aligned ring */
691 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
692 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
693 tx_queue->ptr_mask = entries - 1;
695 netif_dbg(efx, probe, efx->net_dev,
696 "creating TX queue %d size %#x mask %#x\n",
697 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
699 /* Allocate software ring */
700 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
702 if (!tx_queue->buffer)
705 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
706 tx_queue->tsoh_page =
707 kcalloc(efx_tsoh_page_count(tx_queue),
708 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
709 if (!tx_queue->tsoh_page) {
715 /* Allocate hardware ring */
716 rc = efx_nic_probe_tx(tx_queue);
723 kfree(tx_queue->tsoh_page);
724 tx_queue->tsoh_page = NULL;
726 kfree(tx_queue->buffer);
727 tx_queue->buffer = NULL;
731 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
733 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
734 "initialising TX queue %d\n", tx_queue->queue);
736 tx_queue->insert_count = 0;
737 tx_queue->write_count = 0;
738 tx_queue->old_write_count = 0;
739 tx_queue->read_count = 0;
740 tx_queue->old_read_count = 0;
741 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
742 tx_queue->xmit_more_available = false;
744 /* Set up TX descriptor ring */
745 efx_nic_init_tx(tx_queue);
747 tx_queue->initialised = true;
750 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
752 struct efx_tx_buffer *buffer;
754 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
755 "shutting down TX queue %d\n", tx_queue->queue);
757 if (!tx_queue->buffer)
760 /* Free any buffers left in the ring */
761 while (tx_queue->read_count != tx_queue->write_count) {
762 unsigned int pkts_compl = 0, bytes_compl = 0;
763 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
764 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
766 ++tx_queue->read_count;
768 tx_queue->xmit_more_available = false;
769 netdev_tx_reset_queue(tx_queue->core_txq);
772 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
776 if (!tx_queue->buffer)
779 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
780 "destroying TX queue %d\n", tx_queue->queue);
781 efx_nic_remove_tx(tx_queue);
783 if (tx_queue->tsoh_page) {
784 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
785 efx_nic_free_buffer(tx_queue->efx,
786 &tx_queue->tsoh_page[i]);
787 kfree(tx_queue->tsoh_page);
788 tx_queue->tsoh_page = NULL;
791 kfree(tx_queue->buffer);
792 tx_queue->buffer = NULL;
796 /* Efx TCP segmentation acceleration.
798 * Why? Because by doing it here in the driver we can go significantly
799 * faster than the GSO.
801 * Requires TX checksum offload support.
804 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
807 * struct tso_state - TSO state for an SKB
808 * @out_len: Remaining length in current segment
809 * @seqnum: Current sequence number
810 * @ipv4_id: Current IPv4 ID, host endian
811 * @packet_space: Remaining space in current packet
812 * @dma_addr: DMA address of current position
813 * @in_len: Remaining length in current SKB fragment
814 * @unmap_len: Length of SKB fragment
815 * @unmap_addr: DMA address of SKB fragment
816 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
817 * @protocol: Network protocol (after any VLAN header)
818 * @ip_off: Offset of IP header
819 * @tcp_off: Offset of TCP header
820 * @header_len: Number of bytes of header
821 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
822 * @header_dma_addr: Header DMA address, when using option descriptors
823 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
826 * The state used during segmentation. It is put into this data structure
827 * just to make it easy to pass into inline functions.
830 /* Output position */
834 unsigned packet_space;
840 dma_addr_t unmap_addr;
841 unsigned short dma_flags;
845 unsigned int tcp_off;
847 unsigned int ip_base_len;
848 dma_addr_t header_dma_addr;
849 unsigned int header_unmap_len;
854 * Verify that our various assumptions about sk_buffs and the conditions
855 * under which TSO will be attempted hold true. Return the protocol number.
857 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
859 __be16 protocol = skb->protocol;
861 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
863 if (protocol == htons(ETH_P_8021Q)) {
864 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
865 protocol = veh->h_vlan_encapsulated_proto;
868 if (protocol == htons(ETH_P_IP)) {
869 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
871 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
872 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
874 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
875 + (tcp_hdr(skb)->doff << 2u)) >
881 static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
882 struct efx_tx_buffer *buffer, unsigned int len)
886 EFX_BUG_ON_PARANOID(buffer->len);
887 EFX_BUG_ON_PARANOID(buffer->flags);
888 EFX_BUG_ON_PARANOID(buffer->unmap_len);
890 if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
892 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
893 struct efx_buffer *page_buf =
894 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
896 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
898 if (unlikely(!page_buf->addr) &&
899 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
903 result = (u8 *)page_buf->addr + offset;
904 buffer->dma_addr = page_buf->dma_addr + offset;
905 buffer->flags = EFX_TX_BUF_CONT;
907 tx_queue->tso_long_headers++;
909 buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
910 if (unlikely(!buffer->heap_buf))
912 result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
913 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
922 * efx_tx_queue_insert - push descriptors onto the TX queue
923 * @tx_queue: Efx TX queue
924 * @dma_addr: DMA address of fragment
925 * @len: Length of fragment
926 * @final_buffer: The final buffer inserted into the queue
928 * Push descriptors onto the TX queue.
930 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
931 dma_addr_t dma_addr, unsigned len,
932 struct efx_tx_buffer **final_buffer)
934 struct efx_tx_buffer *buffer;
935 struct efx_nic *efx = tx_queue->efx;
938 EFX_BUG_ON_PARANOID(len <= 0);
941 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
942 ++tx_queue->insert_count;
944 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
945 tx_queue->read_count >=
948 buffer->dma_addr = dma_addr;
950 dma_len = efx_max_tx_len(efx, dma_addr);
952 /* If there is enough space to send then do so */
956 buffer->len = dma_len;
957 buffer->flags = EFX_TX_BUF_CONT;
962 EFX_BUG_ON_PARANOID(!len);
964 *final_buffer = buffer;
969 * Put a TSO header into the TX queue.
971 * This is special-cased because we know that it is small enough to fit in
972 * a single fragment, and we know it doesn't cross a page boundary. It
973 * also allows us to not worry about end-of-packet etc.
975 static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
976 struct efx_tx_buffer *buffer, u8 *header)
978 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
979 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
982 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
983 buffer->dma_addr))) {
984 kfree(buffer->heap_buf);
989 buffer->unmap_len = buffer->len;
990 buffer->dma_offset = 0;
991 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
994 ++tx_queue->insert_count;
999 /* Remove buffers put into a tx_queue. None of the buffers must have
1002 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
1003 unsigned int insert_count)
1005 struct efx_tx_buffer *buffer;
1007 /* Work backwards until we hit the original insert pointer value */
1008 while (tx_queue->insert_count != insert_count) {
1009 --tx_queue->insert_count;
1010 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
1011 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
1016 /* Parse the SKB header and initialise state. */
1017 static int tso_start(struct tso_state *st, struct efx_nic *efx,
1018 struct efx_tx_queue *tx_queue,
1019 const struct sk_buff *skb)
1021 struct device *dma_dev = &efx->pci_dev->dev;
1022 unsigned int header_len, in_len;
1023 bool use_opt_desc = false;
1024 dma_addr_t dma_addr;
1026 if (tx_queue->tso_version == 1)
1027 use_opt_desc = true;
1029 st->ip_off = skb_network_header(skb) - skb->data;
1030 st->tcp_off = skb_transport_header(skb) - skb->data;
1031 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
1032 in_len = skb_headlen(skb) - header_len;
1033 st->header_len = header_len;
1034 st->in_len = in_len;
1035 if (st->protocol == htons(ETH_P_IP)) {
1036 st->ip_base_len = st->header_len - st->ip_off;
1037 st->ipv4_id = ntohs(ip_hdr(skb)->id);
1039 st->ip_base_len = st->header_len - st->tcp_off;
1042 st->seqnum = ntohl(tcp_hdr(skb)->seq);
1044 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
1045 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
1046 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
1048 st->out_len = skb->len - header_len;
1050 if (!use_opt_desc) {
1051 st->header_unmap_len = 0;
1053 if (likely(in_len == 0)) {
1059 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
1060 in_len, DMA_TO_DEVICE);
1061 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
1062 st->dma_addr = dma_addr;
1063 st->unmap_addr = dma_addr;
1064 st->unmap_len = in_len;
1066 dma_addr = dma_map_single(dma_dev, skb->data,
1067 skb_headlen(skb), DMA_TO_DEVICE);
1068 st->header_dma_addr = dma_addr;
1069 st->header_unmap_len = skb_headlen(skb);
1071 st->dma_addr = dma_addr + header_len;
1075 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
1078 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
1081 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
1082 skb_frag_size(frag), DMA_TO_DEVICE);
1083 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
1085 st->unmap_len = skb_frag_size(frag);
1086 st->in_len = skb_frag_size(frag);
1087 st->dma_addr = st->unmap_addr;
1095 * tso_fill_packet_with_fragment - form descriptors for the current fragment
1096 * @tx_queue: Efx TX queue
1097 * @skb: Socket buffer
1100 * Form descriptors for the current fragment, until we reach the end
1101 * of fragment or end-of-packet.
1103 static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1104 const struct sk_buff *skb,
1105 struct tso_state *st)
1107 struct efx_tx_buffer *buffer;
1110 if (st->in_len == 0)
1112 if (st->packet_space == 0)
1115 EFX_BUG_ON_PARANOID(st->in_len <= 0);
1116 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
1118 n = min(st->in_len, st->packet_space);
1120 st->packet_space -= n;
1124 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1126 if (st->out_len == 0) {
1127 /* Transfer ownership of the skb */
1129 buffer->flags = EFX_TX_BUF_SKB;
1130 } else if (st->packet_space != 0) {
1131 buffer->flags = EFX_TX_BUF_CONT;
1134 if (st->in_len == 0) {
1135 /* Transfer ownership of the DMA mapping */
1136 buffer->unmap_len = st->unmap_len;
1137 buffer->dma_offset = buffer->unmap_len - buffer->len;
1138 buffer->flags |= st->dma_flags;
1147 * tso_start_new_packet - generate a new header and prepare for the new packet
1148 * @tx_queue: Efx TX queue
1149 * @skb: Socket buffer
1152 * Generate a new header and prepare for the new packet. Return 0 on
1153 * success, or -%ENOMEM if failed to alloc header.
1155 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1156 const struct sk_buff *skb,
1157 struct tso_state *st)
1159 struct efx_tx_buffer *buffer =
1160 efx_tx_queue_get_insert_buffer(tx_queue);
1161 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
1165 st->packet_space = skb_shinfo(skb)->gso_size;
1166 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
1168 st->packet_space = st->out_len;
1169 tcp_flags_clear = 0x00;
1172 if (!st->header_unmap_len) {
1173 /* Allocate and insert a DMA-mapped header buffer. */
1174 struct tcphdr *tsoh_th;
1179 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1183 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1185 /* Copy and update the headers. */
1186 memcpy(header, skb->data, st->header_len);
1188 tsoh_th->seq = htonl(st->seqnum);
1189 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1191 ip_length = st->ip_base_len + st->packet_space;
1193 if (st->protocol == htons(ETH_P_IP)) {
1194 struct iphdr *tsoh_iph =
1195 (struct iphdr *)(header + st->ip_off);
1197 tsoh_iph->tot_len = htons(ip_length);
1198 tsoh_iph->id = htons(st->ipv4_id);
1200 struct ipv6hdr *tsoh_iph =
1201 (struct ipv6hdr *)(header + st->ip_off);
1203 tsoh_iph->payload_len = htons(ip_length);
1206 rc = efx_tso_put_header(tx_queue, buffer, header);
1210 /* Send the original headers with a TSO option descriptor
1213 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
1215 buffer->flags = EFX_TX_BUF_OPTION;
1217 buffer->unmap_len = 0;
1218 EFX_POPULATE_QWORD_5(buffer->option,
1219 ESF_DZ_TX_DESC_IS_OPT, 1,
1220 ESF_DZ_TX_OPTION_TYPE,
1221 ESE_DZ_TX_OPTION_DESC_TSO,
1222 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1223 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1224 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1225 ++tx_queue->insert_count;
1227 /* We mapped the headers in tso_start(). Unmap them
1228 * when the last segment is completed.
1230 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
1231 buffer->dma_addr = st->header_dma_addr;
1232 buffer->len = st->header_len;
1234 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1235 buffer->unmap_len = st->header_unmap_len;
1236 buffer->dma_offset = 0;
1237 /* Ensure we only unmap them once in case of a
1238 * later DMA mapping error and rollback
1240 st->header_unmap_len = 0;
1242 buffer->flags = EFX_TX_BUF_CONT;
1243 buffer->unmap_len = 0;
1245 ++tx_queue->insert_count;
1248 st->seqnum += skb_shinfo(skb)->gso_size;
1250 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1253 ++tx_queue->tso_packets;
1255 ++tx_queue->tx_packets;
1262 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1263 * @tx_queue: Efx TX queue
1264 * @skb: Socket buffer
1266 * Context: You must hold netif_tx_lock() to call this function.
1268 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1269 * @skb was not enqueued. In all cases @skb is consumed. Return
1272 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1273 struct sk_buff *skb)
1275 struct efx_nic *efx = tx_queue->efx;
1276 unsigned int old_insert_count = tx_queue->insert_count;
1278 struct tso_state state;
1280 /* Find the packet protocol and sanity-check it */
1281 state.protocol = efx_tso_check_protocol(skb);
1283 rc = tso_start(&state, efx, tx_queue, skb);
1287 if (likely(state.in_len == 0)) {
1288 /* Grab the first payload fragment. */
1289 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1291 rc = tso_get_fragment(&state, efx,
1292 skb_shinfo(skb)->frags + frag_i);
1296 /* Payload starts in the header area. */
1300 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1304 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1306 /* Move onto the next fragment? */
1307 if (state.in_len == 0) {
1308 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1309 /* End of payload reached. */
1311 rc = tso_get_fragment(&state, efx,
1312 skb_shinfo(skb)->frags + frag_i);
1317 /* Start at new packet? */
1318 if (state.packet_space == 0 &&
1319 tso_start_new_packet(tx_queue, skb, &state) < 0)
1323 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1325 efx_tx_maybe_stop_queue(tx_queue);
1327 /* Pass off to hardware */
1328 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
1329 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
1331 /* There could be packets left on the partner queue if those
1332 * SKBs had skb->xmit_more set. If we do not push those they
1333 * could be left for a long time and cause a netdev watchdog.
1335 if (txq2->xmit_more_available)
1336 efx_nic_push_buffers(txq2);
1338 efx_nic_push_buffers(tx_queue);
1340 tx_queue->xmit_more_available = skb->xmit_more;
1343 tx_queue->tso_bursts++;
1344 return NETDEV_TX_OK;
1347 netif_err(efx, tx_err, efx->net_dev,
1348 "Out of memory for TSO headers, or DMA mapping error\n");
1349 dev_kfree_skb_any(skb);
1351 /* Free the DMA mapping we were in the process of writing out */
1352 if (state.unmap_len) {
1353 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1354 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1355 state.unmap_len, DMA_TO_DEVICE);
1357 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1358 state.unmap_len, DMA_TO_DEVICE);
1361 /* Free the header DMA mapping, if using option descriptors */
1362 if (state.header_unmap_len)
1363 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1364 state.header_unmap_len, DMA_TO_DEVICE);
1366 efx_enqueue_unwind(tx_queue, old_insert_count);
1367 return NETDEV_TX_OK;