1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
44 * i40e_fdir - Generate a Flow Director descriptor based on fdata
45 * @tx_ring: Tx ring to send buffer on
46 * @fdata: Flow director filter data
47 * @add: Indicate if we are adding a rule or deleting one
50 static void i40e_fdir(struct i40e_ring *tx_ring,
51 struct i40e_fdir_filter *fdata, bool add)
53 struct i40e_filter_program_desc *fdir_desc;
54 struct i40e_pf *pf = tx_ring->vsi->back;
55 u32 flex_ptype, dtype_cmd;
58 /* grab the next descriptor */
59 i = tx_ring->next_to_use;
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
65 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
66 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
68 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
69 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
71 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
72 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
74 /* Use LAN VSI Id if not programmed by user */
75 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
76 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
77 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
79 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
82 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
83 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
84 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
85 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
87 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
88 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
90 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
91 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
93 if (fdata->cnt_index) {
94 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
95 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
96 ((u32)fdata->cnt_index <<
97 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
100 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
101 fdir_desc->rsvd = cpu_to_le32(0);
102 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
103 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
106 #define I40E_FD_CLEAN_DELAY 10
108 * i40e_program_fdir_filter - Program a Flow Director filter
109 * @fdir_data: Packet data that will be filter parameters
110 * @raw_packet: the pre-allocated packet buffer for FDir
111 * @pf: The PF pointer
112 * @add: True for add/update, False for remove
114 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
115 u8 *raw_packet, struct i40e_pf *pf,
118 struct i40e_tx_buffer *tx_buf, *first;
119 struct i40e_tx_desc *tx_desc;
120 struct i40e_ring *tx_ring;
121 struct i40e_vsi *vsi;
128 /* find existing FDIR VSI */
130 for (i = 0; i < pf->num_alloc_vsi; i++)
131 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
136 tx_ring = vsi->tx_rings[0];
139 /* we need two descriptors to add/del a filter and we can wait */
141 if (I40E_DESC_UNUSED(tx_ring) > 1)
143 msleep_interruptible(1);
145 } while (delay < I40E_FD_CLEAN_DELAY);
147 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
150 dma = dma_map_single(dev, raw_packet,
151 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
152 if (dma_mapping_error(dev, dma))
155 /* grab the next descriptor */
156 i = tx_ring->next_to_use;
157 first = &tx_ring->tx_bi[i];
158 i40e_fdir(tx_ring, fdir_data, add);
160 /* Now program a dummy descriptor */
161 i = tx_ring->next_to_use;
162 tx_desc = I40E_TX_DESC(tx_ring, i);
163 tx_buf = &tx_ring->tx_bi[i];
165 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
167 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
169 /* record length, and DMA address */
170 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
171 dma_unmap_addr_set(tx_buf, dma, dma);
173 tx_desc->buffer_addr = cpu_to_le64(dma);
174 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
176 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
177 tx_buf->raw_buf = (void *)raw_packet;
179 tx_desc->cmd_type_offset_bsz =
180 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
182 /* Force memory writes to complete before letting h/w
183 * know there are new descriptors to fetch.
187 /* Mark the data descriptor to be watched */
188 first->next_to_watch = tx_desc;
190 writel(tx_ring->next_to_use, tx_ring->tail);
197 #define IP_HEADER_OFFSET 14
198 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
200 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
201 * @vsi: pointer to the targeted VSI
202 * @fd_data: the flow director data required for the FDir descriptor
203 * @add: true adds a filter, false removes it
205 * Returns 0 if the filters were successfully added or removed
207 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
208 struct i40e_fdir_filter *fd_data,
211 struct i40e_pf *pf = vsi->back;
217 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
218 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
219 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
221 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
224 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
226 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
227 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
228 + sizeof(struct iphdr));
230 ip->daddr = fd_data->dst_ip[0];
231 udp->dest = fd_data->dst_port;
232 ip->saddr = fd_data->src_ip[0];
233 udp->source = fd_data->src_port;
235 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
236 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
238 dev_info(&pf->pdev->dev,
239 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
240 fd_data->pctype, fd_data->fd_id, ret);
242 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
244 dev_info(&pf->pdev->dev,
245 "Filter OK for PCTYPE %d loc = %d\n",
246 fd_data->pctype, fd_data->fd_id);
248 dev_info(&pf->pdev->dev,
249 "Filter deleted for PCTYPE %d loc = %d\n",
250 fd_data->pctype, fd_data->fd_id);
255 return err ? -EOPNOTSUPP : 0;
258 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
260 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
261 * @vsi: pointer to the targeted VSI
262 * @fd_data: the flow director data required for the FDir descriptor
263 * @add: true adds a filter, false removes it
265 * Returns 0 if the filters were successfully added or removed
267 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
268 struct i40e_fdir_filter *fd_data,
271 struct i40e_pf *pf = vsi->back;
278 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
279 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
280 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
281 0x0, 0x72, 0, 0, 0, 0};
283 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
286 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
288 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
289 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
290 + sizeof(struct iphdr));
292 ip->daddr = fd_data->dst_ip[0];
293 tcp->dest = fd_data->dst_port;
294 ip->saddr = fd_data->src_ip[0];
295 tcp->source = fd_data->src_port;
299 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
300 I40E_DEBUG_FD & pf->hw.debug_mask)
301 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
302 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
304 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
305 (pf->fd_tcp_rule - 1) : 0;
306 if (pf->fd_tcp_rule == 0) {
307 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
308 I40E_DEBUG_FD & pf->hw.debug_mask)
309 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
310 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
314 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
315 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
318 dev_info(&pf->pdev->dev,
319 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
320 fd_data->pctype, fd_data->fd_id, ret);
322 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
324 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
325 fd_data->pctype, fd_data->fd_id);
327 dev_info(&pf->pdev->dev,
328 "Filter deleted for PCTYPE %d loc = %d\n",
329 fd_data->pctype, fd_data->fd_id);
335 return err ? -EOPNOTSUPP : 0;
339 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
340 * a specific flow spec
341 * @vsi: pointer to the targeted VSI
342 * @fd_data: the flow director data required for the FDir descriptor
343 * @add: true adds a filter, false removes it
345 * Returns 0 if the filters were successfully added or removed
347 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
348 struct i40e_fdir_filter *fd_data,
354 #define I40E_IP_DUMMY_PACKET_LEN 34
356 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
357 * a specific flow spec
358 * @vsi: pointer to the targeted VSI
359 * @fd_data: the flow director data required for the FDir descriptor
360 * @add: true adds a filter, false removes it
362 * Returns 0 if the filters were successfully added or removed
364 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
365 struct i40e_fdir_filter *fd_data,
368 struct i40e_pf *pf = vsi->back;
374 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
375 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
378 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
379 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
380 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
383 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
384 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
386 ip->saddr = fd_data->src_ip[0];
387 ip->daddr = fd_data->dst_ip[0];
391 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
394 dev_info(&pf->pdev->dev,
395 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
396 fd_data->pctype, fd_data->fd_id, ret);
398 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
400 dev_info(&pf->pdev->dev,
401 "Filter OK for PCTYPE %d loc = %d\n",
402 fd_data->pctype, fd_data->fd_id);
404 dev_info(&pf->pdev->dev,
405 "Filter deleted for PCTYPE %d loc = %d\n",
406 fd_data->pctype, fd_data->fd_id);
413 return err ? -EOPNOTSUPP : 0;
417 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
418 * @vsi: pointer to the targeted VSI
419 * @cmd: command to get or set RX flow classification rules
420 * @add: true adds a filter, false removes it
423 int i40e_add_del_fdir(struct i40e_vsi *vsi,
424 struct i40e_fdir_filter *input, bool add)
426 struct i40e_pf *pf = vsi->back;
429 switch (input->flow_type & ~FLOW_EXT) {
431 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
434 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
437 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
440 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
443 switch (input->ip4_proto) {
445 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
448 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
451 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
454 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
459 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
464 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
469 * i40e_fd_handle_status - check the Programming Status for FD
470 * @rx_ring: the Rx ring for this descriptor
471 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
472 * @prog_id: the id originally used for programming
474 * This is used to verify if the FD programming or invalidation
475 * requested by SW to the HW is successful or not and take actions accordingly.
477 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
478 union i40e_rx_desc *rx_desc, u8 prog_id)
480 struct i40e_pf *pf = rx_ring->vsi->back;
481 struct pci_dev *pdev = pf->pdev;
482 u32 fcnt_prog, fcnt_avail;
486 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
487 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
488 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
490 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
491 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
492 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
493 (I40E_DEBUG_FD & pf->hw.debug_mask))
494 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
497 /* Check if the programming error is for ATR.
498 * If so, auto disable ATR and set a state for
499 * flush in progress. Next time we come here if flush is in
500 * progress do nothing, once flush is complete the state will
503 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
507 /* store the current atr filter count */
508 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
510 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
511 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
512 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
513 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
516 /* filter programming failed most likely due to table full */
517 fcnt_prog = i40e_get_global_fd_count(pf);
518 fcnt_avail = pf->fdir_pf_filter_count;
519 /* If ATR is running fcnt_prog can quickly change,
520 * if we are very close to full, it makes sense to disable
521 * FD ATR/SB and then re-enable it when there is room.
523 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
524 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
525 !(pf->auto_disable_flags &
526 I40E_FLAG_FD_SB_ENABLED)) {
527 if (I40E_DEBUG_FD & pf->hw.debug_mask)
528 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
529 pf->auto_disable_flags |=
530 I40E_FLAG_FD_SB_ENABLED;
533 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
534 if (I40E_DEBUG_FD & pf->hw.debug_mask)
535 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
536 rx_desc->wb.qword0.hi_dword.fd_id);
541 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
542 * @ring: the ring that owns the buffer
543 * @tx_buffer: the buffer to free
545 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
546 struct i40e_tx_buffer *tx_buffer)
548 if (tx_buffer->skb) {
549 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
550 kfree(tx_buffer->raw_buf);
552 dev_kfree_skb_any(tx_buffer->skb);
553 if (dma_unmap_len(tx_buffer, len))
554 dma_unmap_single(ring->dev,
555 dma_unmap_addr(tx_buffer, dma),
556 dma_unmap_len(tx_buffer, len),
558 } else if (dma_unmap_len(tx_buffer, len)) {
559 dma_unmap_page(ring->dev,
560 dma_unmap_addr(tx_buffer, dma),
561 dma_unmap_len(tx_buffer, len),
565 tx_buffer->next_to_watch = NULL;
566 tx_buffer->skb = NULL;
567 dma_unmap_len_set(tx_buffer, len, 0);
568 /* tx_buffer must be completely set up in the transmit path */
572 * i40e_clean_tx_ring - Free any empty Tx buffers
573 * @tx_ring: ring to be cleaned
575 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
577 unsigned long bi_size;
580 /* ring already cleared, nothing to do */
584 /* Free all the Tx ring sk_buffs */
585 for (i = 0; i < tx_ring->count; i++)
586 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
588 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
589 memset(tx_ring->tx_bi, 0, bi_size);
591 /* Zero out the descriptor ring */
592 memset(tx_ring->desc, 0, tx_ring->size);
594 tx_ring->next_to_use = 0;
595 tx_ring->next_to_clean = 0;
597 if (!tx_ring->netdev)
600 /* cleanup Tx queue statistics */
601 netdev_tx_reset_queue(txring_txq(tx_ring));
605 * i40e_free_tx_resources - Free Tx resources per queue
606 * @tx_ring: Tx descriptor ring for a specific queue
608 * Free all transmit software resources
610 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
612 i40e_clean_tx_ring(tx_ring);
613 kfree(tx_ring->tx_bi);
614 tx_ring->tx_bi = NULL;
617 dma_free_coherent(tx_ring->dev, tx_ring->size,
618 tx_ring->desc, tx_ring->dma);
619 tx_ring->desc = NULL;
624 * i40e_get_tx_pending - how many tx descriptors not processed
625 * @tx_ring: the ring of descriptors
626 * @in_sw: is tx_pending being checked in SW or HW
628 * Since there is no access to the ring head register
629 * in XL710, we need to use our local copies
631 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
636 head = i40e_get_head(ring);
638 head = ring->next_to_clean;
639 tail = readl(ring->tail);
642 return (head < tail) ?
643 tail - head : (tail + ring->count - head);
648 #define WB_STRIDE 0x3
651 * i40e_clean_tx_irq - Reclaim resources after transmit completes
652 * @vsi: the VSI we care about
653 * @tx_ring: Tx ring to clean
654 * @napi_budget: Used to determine if we are in netpoll
656 * Returns true if there's any budget left (e.g. the clean is finished)
658 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
659 struct i40e_ring *tx_ring, int napi_budget)
661 u16 i = tx_ring->next_to_clean;
662 struct i40e_tx_buffer *tx_buf;
663 struct i40e_tx_desc *tx_head;
664 struct i40e_tx_desc *tx_desc;
665 unsigned int total_bytes = 0, total_packets = 0;
666 unsigned int budget = vsi->work_limit;
668 tx_buf = &tx_ring->tx_bi[i];
669 tx_desc = I40E_TX_DESC(tx_ring, i);
672 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
675 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
677 /* if next_to_watch is not set then there is no work pending */
681 /* prevent any other reads prior to eop_desc */
684 /* we have caught up to head, no work left to do */
685 if (tx_head == tx_desc)
688 /* clear next_to_watch to prevent false hangs */
689 tx_buf->next_to_watch = NULL;
691 /* update the statistics for this packet */
692 total_bytes += tx_buf->bytecount;
693 total_packets += tx_buf->gso_segs;
696 napi_consume_skb(tx_buf->skb, napi_budget);
698 /* unmap skb header data */
699 dma_unmap_single(tx_ring->dev,
700 dma_unmap_addr(tx_buf, dma),
701 dma_unmap_len(tx_buf, len),
704 /* clear tx_buffer data */
706 dma_unmap_len_set(tx_buf, len, 0);
708 /* unmap remaining buffers */
709 while (tx_desc != eop_desc) {
716 tx_buf = tx_ring->tx_bi;
717 tx_desc = I40E_TX_DESC(tx_ring, 0);
720 /* unmap any remaining paged data */
721 if (dma_unmap_len(tx_buf, len)) {
722 dma_unmap_page(tx_ring->dev,
723 dma_unmap_addr(tx_buf, dma),
724 dma_unmap_len(tx_buf, len),
726 dma_unmap_len_set(tx_buf, len, 0);
730 /* move us one more past the eop_desc for start of next pkt */
736 tx_buf = tx_ring->tx_bi;
737 tx_desc = I40E_TX_DESC(tx_ring, 0);
742 /* update budget accounting */
744 } while (likely(budget));
747 tx_ring->next_to_clean = i;
748 u64_stats_update_begin(&tx_ring->syncp);
749 tx_ring->stats.bytes += total_bytes;
750 tx_ring->stats.packets += total_packets;
751 u64_stats_update_end(&tx_ring->syncp);
752 tx_ring->q_vector->tx.total_bytes += total_bytes;
753 tx_ring->q_vector->tx.total_packets += total_packets;
755 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
756 /* check to see if there are < 4 descriptors
757 * waiting to be written back, then kick the hardware to force
758 * them to be written back in case we stay in NAPI.
759 * In this mode on X722 we do not enable Interrupt.
761 unsigned int j = i40e_get_tx_pending(tx_ring, false);
764 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
765 !test_bit(__I40E_DOWN, &vsi->state) &&
766 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
767 tx_ring->arm_wb = true;
770 /* notify netdev of completed buffers */
771 netdev_tx_completed_queue(txring_txq(tx_ring),
772 total_packets, total_bytes);
774 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
775 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
776 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
777 /* Make sure that anybody stopping the queue after this
778 * sees the new next_to_clean.
781 if (__netif_subqueue_stopped(tx_ring->netdev,
782 tx_ring->queue_index) &&
783 !test_bit(__I40E_DOWN, &vsi->state)) {
784 netif_wake_subqueue(tx_ring->netdev,
785 tx_ring->queue_index);
786 ++tx_ring->tx_stats.restart_queue;
794 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
795 * @vsi: the VSI we care about
796 * @q_vector: the vector on which to enable writeback
799 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
800 struct i40e_q_vector *q_vector)
802 u16 flags = q_vector->tx.ring[0].flags;
805 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
808 if (q_vector->arm_wb_state)
811 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
812 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
813 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
816 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
819 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
820 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
822 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
824 q_vector->arm_wb_state = true;
828 * i40e_force_wb - Issue SW Interrupt so HW does a wb
829 * @vsi: the VSI we care about
830 * @q_vector: the vector on which to force writeback
833 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
835 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
836 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
837 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
838 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
839 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
840 /* allow 00 to be written to the index */
843 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
844 vsi->base_vector - 1), val);
846 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
847 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
848 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
849 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
850 /* allow 00 to be written to the index */
852 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
857 * i40e_set_new_dynamic_itr - Find new ITR level
858 * @rc: structure containing ring performance data
860 * Returns true if ITR changed, false if not
862 * Stores a new ITR value based on packets and byte counts during
863 * the last interrupt. The advantage of per interrupt computation
864 * is faster updates and more accurate ITR for the current traffic
865 * pattern. Constants in this function were computed based on
866 * theoretical maximum wire speed and thresholds were set based on
867 * testing data as well as attempting to minimize response time
868 * while increasing bulk throughput.
870 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
872 enum i40e_latency_range new_latency_range = rc->latency_range;
873 struct i40e_q_vector *qv = rc->ring->q_vector;
874 u32 new_itr = rc->itr;
878 if (rc->total_packets == 0 || !rc->itr)
881 /* simple throttlerate management
882 * 0-10MB/s lowest (50000 ints/s)
883 * 10-20MB/s low (20000 ints/s)
884 * 20-1249MB/s bulk (18000 ints/s)
885 * > 40000 Rx packets per second (8000 ints/s)
887 * The math works out because the divisor is in 10^(-6) which
888 * turns the bytes/us input value into MB/s values, but
889 * make sure to use usecs, as the register values written
890 * are in 2 usec increments in the ITR registers, and make sure
891 * to use the smoothed values that the countdown timer gives us.
893 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
894 bytes_per_int = rc->total_bytes / usecs;
896 switch (new_latency_range) {
897 case I40E_LOWEST_LATENCY:
898 if (bytes_per_int > 10)
899 new_latency_range = I40E_LOW_LATENCY;
901 case I40E_LOW_LATENCY:
902 if (bytes_per_int > 20)
903 new_latency_range = I40E_BULK_LATENCY;
904 else if (bytes_per_int <= 10)
905 new_latency_range = I40E_LOWEST_LATENCY;
907 case I40E_BULK_LATENCY:
908 case I40E_ULTRA_LATENCY:
910 if (bytes_per_int <= 20)
911 new_latency_range = I40E_LOW_LATENCY;
915 /* this is to adjust RX more aggressively when streaming small
916 * packets. The value of 40000 was picked as it is just beyond
917 * what the hardware can receive per second if in low latency
920 #define RX_ULTRA_PACKET_RATE 40000
922 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
924 new_latency_range = I40E_ULTRA_LATENCY;
926 rc->latency_range = new_latency_range;
928 switch (new_latency_range) {
929 case I40E_LOWEST_LATENCY:
930 new_itr = I40E_ITR_50K;
932 case I40E_LOW_LATENCY:
933 new_itr = I40E_ITR_20K;
935 case I40E_BULK_LATENCY:
936 new_itr = I40E_ITR_18K;
938 case I40E_ULTRA_LATENCY:
939 new_itr = I40E_ITR_8K;
946 rc->total_packets = 0;
948 if (new_itr != rc->itr) {
957 * i40e_clean_programming_status - clean the programming status descriptor
958 * @rx_ring: the rx ring that has this descriptor
959 * @rx_desc: the rx descriptor written back by HW
961 * Flow director should handle FD_FILTER_STATUS to check its filter programming
962 * status being successful or not and take actions accordingly. FCoE should
963 * handle its context/filter programming/invalidation status and take actions.
966 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
967 union i40e_rx_desc *rx_desc)
972 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
973 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
974 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
976 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
977 i40e_fd_handle_status(rx_ring, rx_desc, id);
979 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
980 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
981 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
986 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
987 * @tx_ring: the tx ring to set up
989 * Return 0 on success, negative on error
991 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
993 struct device *dev = tx_ring->dev;
999 /* warn if we are about to overwrite the pointer */
1000 WARN_ON(tx_ring->tx_bi);
1001 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1002 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1003 if (!tx_ring->tx_bi)
1006 /* round up to nearest 4K */
1007 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1008 /* add u32 for head writeback, align after this takes care of
1009 * guaranteeing this is at least one cache line in size
1011 tx_ring->size += sizeof(u32);
1012 tx_ring->size = ALIGN(tx_ring->size, 4096);
1013 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1014 &tx_ring->dma, GFP_KERNEL);
1015 if (!tx_ring->desc) {
1016 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1021 tx_ring->next_to_use = 0;
1022 tx_ring->next_to_clean = 0;
1026 kfree(tx_ring->tx_bi);
1027 tx_ring->tx_bi = NULL;
1032 * i40e_clean_rx_ring - Free Rx buffers
1033 * @rx_ring: ring to be cleaned
1035 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1037 struct device *dev = rx_ring->dev;
1038 unsigned long bi_size;
1041 /* ring already cleared, nothing to do */
1042 if (!rx_ring->rx_bi)
1045 /* Free all the Rx ring sk_buffs */
1046 for (i = 0; i < rx_ring->count; i++) {
1047 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1050 dev_kfree_skb(rx_bi->skb);
1056 dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
1057 __free_pages(rx_bi->page, 0);
1060 rx_bi->page_offset = 0;
1063 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1064 memset(rx_ring->rx_bi, 0, bi_size);
1066 /* Zero out the descriptor ring */
1067 memset(rx_ring->desc, 0, rx_ring->size);
1069 rx_ring->next_to_alloc = 0;
1070 rx_ring->next_to_clean = 0;
1071 rx_ring->next_to_use = 0;
1075 * i40e_free_rx_resources - Free Rx resources
1076 * @rx_ring: ring to clean the resources from
1078 * Free all receive software resources
1080 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1082 i40e_clean_rx_ring(rx_ring);
1083 kfree(rx_ring->rx_bi);
1084 rx_ring->rx_bi = NULL;
1086 if (rx_ring->desc) {
1087 dma_free_coherent(rx_ring->dev, rx_ring->size,
1088 rx_ring->desc, rx_ring->dma);
1089 rx_ring->desc = NULL;
1094 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1095 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1097 * Returns 0 on success, negative on failure
1099 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1101 struct device *dev = rx_ring->dev;
1104 /* warn if we are about to overwrite the pointer */
1105 WARN_ON(rx_ring->rx_bi);
1106 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1107 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1108 if (!rx_ring->rx_bi)
1111 u64_stats_init(&rx_ring->syncp);
1113 /* Round up to nearest 4K */
1114 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1115 rx_ring->size = ALIGN(rx_ring->size, 4096);
1116 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1117 &rx_ring->dma, GFP_KERNEL);
1119 if (!rx_ring->desc) {
1120 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1125 rx_ring->next_to_alloc = 0;
1126 rx_ring->next_to_clean = 0;
1127 rx_ring->next_to_use = 0;
1131 kfree(rx_ring->rx_bi);
1132 rx_ring->rx_bi = NULL;
1137 * i40e_release_rx_desc - Store the new tail and head values
1138 * @rx_ring: ring to bump
1139 * @val: new head index
1141 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1143 rx_ring->next_to_use = val;
1145 /* update next to alloc since we have filled the ring */
1146 rx_ring->next_to_alloc = val;
1148 /* Force memory writes to complete before letting h/w
1149 * know there are new descriptors to fetch. (Only
1150 * applicable for weak-ordered memory model archs,
1154 writel(val, rx_ring->tail);
1158 * i40e_alloc_mapped_page - recycle or make a new page
1159 * @rx_ring: ring to use
1160 * @bi: rx_buffer struct to modify
1162 * Returns true if the page was successfully allocated or
1165 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1166 struct i40e_rx_buffer *bi)
1168 struct page *page = bi->page;
1171 /* since we are recycling buffers we should seldom need to alloc */
1173 rx_ring->rx_stats.page_reuse_count++;
1177 /* alloc new page for storage */
1178 page = dev_alloc_page();
1179 if (unlikely(!page)) {
1180 rx_ring->rx_stats.alloc_page_failed++;
1184 /* map page for use */
1185 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1187 /* if mapping failed free memory back to system since
1188 * there isn't much point in holding memory we can't use
1190 if (dma_mapping_error(rx_ring->dev, dma)) {
1191 __free_pages(page, 0);
1192 rx_ring->rx_stats.alloc_page_failed++;
1198 bi->page_offset = 0;
1204 * i40e_receive_skb - Send a completed packet up the stack
1205 * @rx_ring: rx ring in play
1206 * @skb: packet to send up
1207 * @vlan_tag: vlan tag for packet
1209 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1210 struct sk_buff *skb, u16 vlan_tag)
1212 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1214 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1215 (vlan_tag & VLAN_VID_MASK))
1216 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1218 napi_gro_receive(&q_vector->napi, skb);
1222 * i40e_alloc_rx_buffers - Replace used receive buffers
1223 * @rx_ring: ring to place buffers on
1224 * @cleaned_count: number of buffers to replace
1226 * Returns false if all allocations were successful, true if any fail
1228 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1230 u16 ntu = rx_ring->next_to_use;
1231 union i40e_rx_desc *rx_desc;
1232 struct i40e_rx_buffer *bi;
1234 /* do nothing if no valid netdev defined */
1235 if (!rx_ring->netdev || !cleaned_count)
1238 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1239 bi = &rx_ring->rx_bi[ntu];
1242 if (!i40e_alloc_mapped_page(rx_ring, bi))
1245 /* Refresh the desc even if buffer_addrs didn't change
1246 * because each write-back erases this info.
1248 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1249 rx_desc->read.hdr_addr = 0;
1254 if (unlikely(ntu == rx_ring->count)) {
1255 rx_desc = I40E_RX_DESC(rx_ring, 0);
1256 bi = rx_ring->rx_bi;
1260 /* clear the status bits for the next_to_use descriptor */
1261 rx_desc->wb.qword1.status_error_len = 0;
1264 } while (cleaned_count);
1266 if (rx_ring->next_to_use != ntu)
1267 i40e_release_rx_desc(rx_ring, ntu);
1272 if (rx_ring->next_to_use != ntu)
1273 i40e_release_rx_desc(rx_ring, ntu);
1275 /* make sure to come back via polling to try again after
1276 * allocation failure
1282 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1283 * @vsi: the VSI we care about
1284 * @skb: skb currently being received and modified
1285 * @rx_desc: the receive descriptor
1287 * skb->protocol must be set before this function is called
1289 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1290 struct sk_buff *skb,
1291 union i40e_rx_desc *rx_desc)
1293 struct i40e_rx_ptype_decoded decoded;
1294 u32 rx_error, rx_status;
1299 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1300 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1301 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1302 I40E_RXD_QW1_ERROR_SHIFT;
1303 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1304 I40E_RXD_QW1_STATUS_SHIFT;
1305 decoded = decode_rx_desc_ptype(ptype);
1307 skb->ip_summed = CHECKSUM_NONE;
1309 skb_checksum_none_assert(skb);
1311 /* Rx csum enabled and ip headers found? */
1312 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1315 /* did the hardware decode the packet and checksum? */
1316 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1319 /* both known and outer_ip must be set for the below code to work */
1320 if (!(decoded.known && decoded.outer_ip))
1323 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1324 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1325 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1326 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1329 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1330 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1333 /* likely incorrect csum if alternate IP extension headers found */
1335 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1336 /* don't increment checksum err here, non-fatal err */
1339 /* there was some L4 error, count error and punt packet to the stack */
1340 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1343 /* handle packets that were not able to be checksummed due
1344 * to arrival speed, in this case the stack can compute
1347 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1350 /* If there is an outer header present that might contain a checksum
1351 * we need to bump the checksum level by 1 to reflect the fact that
1352 * we are indicating we validated the inner checksum.
1354 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1355 skb->csum_level = 1;
1357 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1358 switch (decoded.inner_prot) {
1359 case I40E_RX_PTYPE_INNER_PROT_TCP:
1360 case I40E_RX_PTYPE_INNER_PROT_UDP:
1361 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1362 skb->ip_summed = CHECKSUM_UNNECESSARY;
1371 vsi->back->hw_csum_rx_error++;
1375 * i40e_ptype_to_htype - get a hash type
1376 * @ptype: the ptype value from the descriptor
1378 * Returns a hash type to be used by skb_set_hash
1380 static inline int i40e_ptype_to_htype(u8 ptype)
1382 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1385 return PKT_HASH_TYPE_NONE;
1387 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1388 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1389 return PKT_HASH_TYPE_L4;
1390 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1391 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1392 return PKT_HASH_TYPE_L3;
1394 return PKT_HASH_TYPE_L2;
1398 * i40e_rx_hash - set the hash value in the skb
1399 * @ring: descriptor ring
1400 * @rx_desc: specific descriptor
1402 static inline void i40e_rx_hash(struct i40e_ring *ring,
1403 union i40e_rx_desc *rx_desc,
1404 struct sk_buff *skb,
1408 const __le64 rss_mask =
1409 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1410 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1412 if (!(ring->netdev->features & NETIF_F_RXHASH))
1415 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1416 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1417 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1422 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1423 * @rx_ring: rx descriptor ring packet is being transacted on
1424 * @rx_desc: pointer to the EOP Rx descriptor
1425 * @skb: pointer to current skb being populated
1426 * @rx_ptype: the packet type decoded by hardware
1428 * This function checks the ring, descriptor, and packet information in
1429 * order to populate the hash, checksum, VLAN, protocol, and
1430 * other fields within the skb.
1433 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1434 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1437 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1438 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1439 I40E_RXD_QW1_STATUS_SHIFT;
1440 u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1441 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1443 if (unlikely(rsyn)) {
1444 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
1445 rx_ring->last_rx_timestamp = jiffies;
1448 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1450 /* modifies the skb - consumes the enet header */
1451 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1453 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1455 skb_record_rx_queue(skb, rx_ring->queue_index);
1459 * i40e_pull_tail - i40e specific version of skb_pull_tail
1460 * @rx_ring: rx descriptor ring packet is being transacted on
1461 * @skb: pointer to current skb being adjusted
1463 * This function is an i40e specific version of __pskb_pull_tail. The
1464 * main difference between this version and the original function is that
1465 * this function can make several assumptions about the state of things
1466 * that allow for significant optimizations versus the standard function.
1467 * As a result we can do things like drop a frag and maintain an accurate
1468 * truesize for the skb.
1470 static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
1472 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1474 unsigned int pull_len;
1476 /* it is valid to use page_address instead of kmap since we are
1477 * working with pages allocated out of the lomem pool per
1478 * alloc_page(GFP_ATOMIC)
1480 va = skb_frag_address(frag);
1482 /* we need the header to contain the greater of either ETH_HLEN or
1483 * 60 bytes if the skb->len is less than 60 for skb_pad.
1485 pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1487 /* align pull length to size of long to optimize memcpy performance */
1488 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1490 /* update all of the pointers */
1491 skb_frag_size_sub(frag, pull_len);
1492 frag->page_offset += pull_len;
1493 skb->data_len -= pull_len;
1494 skb->tail += pull_len;
1498 * i40e_cleanup_headers - Correct empty headers
1499 * @rx_ring: rx descriptor ring packet is being transacted on
1500 * @skb: pointer to current skb being fixed
1502 * Also address the case where we are pulling data in on pages only
1503 * and as such no data is present in the skb header.
1505 * In addition if skb is not at least 60 bytes we need to pad it so that
1506 * it is large enough to qualify as a valid Ethernet frame.
1508 * Returns true if an error was encountered and skb was freed.
1510 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1512 /* place header in linear portion of buffer */
1513 if (skb_is_nonlinear(skb))
1514 i40e_pull_tail(rx_ring, skb);
1516 /* if eth_skb_pad returns an error the skb was freed */
1517 if (eth_skb_pad(skb))
1524 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1525 * @rx_ring: rx descriptor ring to store buffers on
1526 * @old_buff: donor buffer to have page reused
1528 * Synchronizes page for reuse by the adapter
1530 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1531 struct i40e_rx_buffer *old_buff)
1533 struct i40e_rx_buffer *new_buff;
1534 u16 nta = rx_ring->next_to_alloc;
1536 new_buff = &rx_ring->rx_bi[nta];
1538 /* update, and store next to alloc */
1540 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1542 /* transfer page from old buffer to new buffer */
1543 *new_buff = *old_buff;
1547 * i40e_page_is_reserved - check if reuse is possible
1548 * @page: page struct to check
1550 static inline bool i40e_page_is_reserved(struct page *page)
1552 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1556 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1557 * @rx_ring: rx descriptor ring to transact packets on
1558 * @rx_buffer: buffer containing page to add
1559 * @rx_desc: descriptor containing length of buffer written by hardware
1560 * @skb: sk_buff to place the data into
1562 * This function will add the data contained in rx_buffer->page to the skb.
1563 * This is done either through a direct copy if the data in the buffer is
1564 * less than the skb header size, otherwise it will just attach the page as
1565 * a frag to the skb.
1567 * The function will then update the page offset if necessary and return
1568 * true if the buffer can be reused by the adapter.
1570 static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
1571 struct i40e_rx_buffer *rx_buffer,
1572 union i40e_rx_desc *rx_desc,
1573 struct sk_buff *skb)
1575 struct page *page = rx_buffer->page;
1576 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1577 unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1578 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1579 #if (PAGE_SIZE < 8192)
1580 unsigned int truesize = I40E_RXBUFFER_2048;
1582 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1583 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1586 /* will the data fit in the skb we allocated? if so, just
1587 * copy it as it is pretty small anyway
1589 if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1590 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1592 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1594 /* page is not reserved, we can reuse buffer as-is */
1595 if (likely(!i40e_page_is_reserved(page)))
1598 /* this page cannot be reused so discard it */
1599 __free_pages(page, 0);
1603 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1604 rx_buffer->page_offset, size, truesize);
1606 /* avoid re-using remote pages */
1607 if (unlikely(i40e_page_is_reserved(page)))
1610 #if (PAGE_SIZE < 8192)
1611 /* if we are only owner of page we can reuse it */
1612 if (unlikely(page_count(page) != 1))
1615 /* flip page offset to other buffer */
1616 rx_buffer->page_offset ^= truesize;
1618 /* move offset up to the next cache line */
1619 rx_buffer->page_offset += truesize;
1621 if (rx_buffer->page_offset > last_offset)
1625 /* Even if we own the page, we are not allowed to use atomic_set()
1626 * This would break get_page_unless_zero() users.
1628 get_page(rx_buffer->page);
1634 * i40e_fetch_rx_buffer - Allocate skb and populate it
1635 * @rx_ring: rx descriptor ring to transact packets on
1636 * @rx_desc: descriptor containing info written by hardware
1638 * This function allocates an skb on the fly, and populates it with the page
1639 * data from the current receive descriptor, taking care to set up the skb
1640 * correctly, as well as handling calling the page recycle function if
1644 struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
1645 union i40e_rx_desc *rx_desc)
1647 struct i40e_rx_buffer *rx_buffer;
1648 struct sk_buff *skb;
1651 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1652 page = rx_buffer->page;
1655 skb = rx_buffer->skb;
1658 void *page_addr = page_address(page) + rx_buffer->page_offset;
1660 /* prefetch first cache line of first page */
1661 prefetch(page_addr);
1662 #if L1_CACHE_BYTES < 128
1663 prefetch(page_addr + L1_CACHE_BYTES);
1666 /* allocate a skb to store the frags */
1667 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1669 GFP_ATOMIC | __GFP_NOWARN);
1670 if (unlikely(!skb)) {
1671 rx_ring->rx_stats.alloc_buff_failed++;
1675 /* we will be copying header into skb->data in
1676 * pskb_may_pull so it is in our interest to prefetch
1677 * it now to avoid a possible cache miss
1679 prefetchw(skb->data);
1681 rx_buffer->skb = NULL;
1684 /* we are reusing so sync this buffer for CPU use */
1685 dma_sync_single_range_for_cpu(rx_ring->dev,
1687 rx_buffer->page_offset,
1691 /* pull page into skb */
1692 if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1693 /* hand second half of page back to the ring */
1694 i40e_reuse_rx_page(rx_ring, rx_buffer);
1695 rx_ring->rx_stats.page_reuse_count++;
1697 /* we are not reusing the buffer so unmap it */
1698 dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1702 /* clear contents of buffer_info */
1703 rx_buffer->page = NULL;
1709 * i40e_is_non_eop - process handling of non-EOP buffers
1710 * @rx_ring: Rx ring being processed
1711 * @rx_desc: Rx descriptor for current buffer
1712 * @skb: Current socket buffer containing buffer in progress
1714 * This function updates next to clean. If the buffer is an EOP buffer
1715 * this function exits returning false, otherwise it will place the
1716 * sk_buff in the next buffer to be chained and return true indicating
1717 * that this is in fact a non-EOP buffer.
1719 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1720 union i40e_rx_desc *rx_desc,
1721 struct sk_buff *skb)
1723 u32 ntc = rx_ring->next_to_clean + 1;
1725 /* fetch, update, and store next to clean */
1726 ntc = (ntc < rx_ring->count) ? ntc : 0;
1727 rx_ring->next_to_clean = ntc;
1729 prefetch(I40E_RX_DESC(rx_ring, ntc));
1731 #define staterrlen rx_desc->wb.qword1.status_error_len
1732 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
1733 i40e_clean_programming_status(rx_ring, rx_desc);
1734 rx_ring->rx_bi[ntc].skb = skb;
1737 /* if we are the last buffer then there is nothing else to do */
1738 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1739 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1742 /* place skb in next buffer to be received */
1743 rx_ring->rx_bi[ntc].skb = skb;
1744 rx_ring->rx_stats.non_eop_descs++;
1750 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1751 * @rx_ring: rx descriptor ring to transact packets on
1752 * @budget: Total limit on number of packets to process
1754 * This function provides a "bounce buffer" approach to Rx interrupt
1755 * processing. The advantage to this is that on systems that have
1756 * expensive overhead for IOMMU access this provides a means of avoiding
1757 * it by maintaining the mapping of the page to the system.
1759 * Returns amount of work completed
1761 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1763 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1764 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1765 bool failure = false;
1767 while (likely(total_rx_packets < budget)) {
1768 union i40e_rx_desc *rx_desc;
1769 struct sk_buff *skb;
1775 /* return some buffers to hardware, one at a time is too slow */
1776 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1777 failure = failure ||
1778 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1782 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1784 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1785 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1786 I40E_RXD_QW1_PTYPE_SHIFT;
1787 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1788 I40E_RXD_QW1_STATUS_SHIFT;
1790 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1793 /* status_error_len will always be zero for unused descriptors
1794 * because it's cleared in cleanup, and overlaps with hdr_addr
1795 * which is always zero because packet split isn't used, if the
1796 * hardware wrote DD then it will be non-zero
1798 if (!rx_desc->wb.qword1.status_error_len)
1801 /* This memory barrier is needed to keep us from reading
1802 * any other fields out of the rx_desc until we know the
1807 skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
1813 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
1816 /* ERR_MASK will only have valid bits if EOP set, and
1817 * what we are doing here is actually checking
1818 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1821 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1822 dev_kfree_skb_any(skb);
1827 if (i40e_cleanup_headers(rx_ring, skb))
1830 /* probably a little skewed due to removing CRC */
1831 total_rx_bytes += skb->len;
1833 /* populate checksum, VLAN, and protocol */
1834 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1838 i40e_rx_is_fcoe(rx_ptype) &&
1839 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
1840 dev_kfree_skb_any(skb);
1845 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1846 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1848 i40e_receive_skb(rx_ring, skb, vlan_tag);
1850 /* update budget accounting */
1854 u64_stats_update_begin(&rx_ring->syncp);
1855 rx_ring->stats.packets += total_rx_packets;
1856 rx_ring->stats.bytes += total_rx_bytes;
1857 u64_stats_update_end(&rx_ring->syncp);
1858 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1859 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1861 /* guarantee a trip back through this routine if there was a failure */
1862 return failure ? budget : total_rx_packets;
1865 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1869 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1870 /* Don't clear PBA because that can cause lost interrupts that
1871 * came in while we were cleaning/polling
1873 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1874 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1879 /* a small macro to shorten up some long lines */
1880 #define INTREG I40E_PFINT_DYN_CTLN
1881 static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
1883 return !!(vsi->rx_rings[idx]->rx_itr_setting);
1886 static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
1888 return !!(vsi->tx_rings[idx]->tx_itr_setting);
1892 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1893 * @vsi: the VSI we care about
1894 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1897 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1898 struct i40e_q_vector *q_vector)
1900 struct i40e_hw *hw = &vsi->back->hw;
1901 bool rx = false, tx = false;
1904 int idx = q_vector->v_idx;
1905 int rx_itr_setting, tx_itr_setting;
1907 vector = (q_vector->v_idx + vsi->base_vector);
1909 /* avoid dynamic calculation if in countdown mode OR if
1910 * all dynamic is disabled
1912 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1914 rx_itr_setting = get_rx_itr_enabled(vsi, idx);
1915 tx_itr_setting = get_tx_itr_enabled(vsi, idx);
1917 if (q_vector->itr_countdown > 0 ||
1918 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
1919 !ITR_IS_DYNAMIC(tx_itr_setting))) {
1923 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
1924 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1925 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1928 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
1929 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1930 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1934 /* get the higher of the two ITR adjustments and
1935 * use the same value for both ITR registers
1936 * when in adaptive mode (Rx and/or Tx)
1938 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1940 q_vector->tx.itr = q_vector->rx.itr = itr;
1941 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1943 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1947 /* only need to enable the interrupt once, but need
1948 * to possibly update both ITR values
1951 /* set the INTENA_MSK_MASK so that this first write
1952 * won't actually enable the interrupt, instead just
1953 * updating the ITR (it's bit 31 PF and VF)
1956 /* don't check _DOWN because interrupt isn't being enabled */
1957 wr32(hw, INTREG(vector - 1), rxval);
1961 if (!test_bit(__I40E_DOWN, &vsi->state))
1962 wr32(hw, INTREG(vector - 1), txval);
1964 if (q_vector->itr_countdown)
1965 q_vector->itr_countdown--;
1967 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1971 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1972 * @napi: napi struct with our devices info in it
1973 * @budget: amount of work driver is allowed to do this pass, in packets
1975 * This function will clean all queues associated with a q_vector.
1977 * Returns the amount of work done
1979 int i40e_napi_poll(struct napi_struct *napi, int budget)
1981 struct i40e_q_vector *q_vector =
1982 container_of(napi, struct i40e_q_vector, napi);
1983 struct i40e_vsi *vsi = q_vector->vsi;
1984 struct i40e_ring *ring;
1985 bool clean_complete = true;
1986 bool arm_wb = false;
1987 int budget_per_ring;
1990 if (test_bit(__I40E_DOWN, &vsi->state)) {
1991 napi_complete(napi);
1995 /* Clear hung_detected bit */
1996 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
1997 /* Since the actual Tx work is minimal, we can give the Tx a larger
1998 * budget and be more aggressive about cleaning up the Tx descriptors.
2000 i40e_for_each_ring(ring, q_vector->tx) {
2001 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2002 clean_complete = false;
2005 arm_wb |= ring->arm_wb;
2006 ring->arm_wb = false;
2009 /* Handle case where we are called by netpoll with a budget of 0 */
2013 /* We attempt to distribute budget to each Rx queue fairly, but don't
2014 * allow the budget to go below 1 because that would exit polling early.
2016 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2018 i40e_for_each_ring(ring, q_vector->rx) {
2019 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2021 work_done += cleaned;
2022 /* if we clean as many as budgeted, we must not be done */
2023 if (cleaned >= budget_per_ring)
2024 clean_complete = false;
2027 /* If work not completed, return budget and polling will return */
2028 if (!clean_complete) {
2031 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2032 i40e_enable_wb_on_itr(vsi, q_vector);
2037 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2038 q_vector->arm_wb_state = false;
2040 /* Work is done so exit the polling mode and re-enable the interrupt */
2041 napi_complete_done(napi, work_done);
2042 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
2043 i40e_update_enable_itr(vsi, q_vector);
2044 } else { /* Legacy mode */
2045 i40e_irq_dynamic_enable_icr0(vsi->back, false);
2051 * i40e_atr - Add a Flow Director ATR filter
2052 * @tx_ring: ring to add programming descriptor to
2054 * @tx_flags: send tx flags
2056 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2059 struct i40e_filter_program_desc *fdir_desc;
2060 struct i40e_pf *pf = tx_ring->vsi->back;
2062 unsigned char *network;
2064 struct ipv6hdr *ipv6;
2068 u32 flex_ptype, dtype_cmd;
2072 /* make sure ATR is enabled */
2073 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2076 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2079 /* if sampling is disabled do nothing */
2080 if (!tx_ring->atr_sample_rate)
2083 /* Currently only IPv4/IPv6 with TCP is supported */
2084 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2087 /* snag network header to get L4 type and address */
2088 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2089 skb_inner_network_header(skb) : skb_network_header(skb);
2091 /* Note: tx_flags gets modified to reflect inner protocols in
2092 * tx_enable_csum function if encap is enabled.
2094 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2095 /* access ihl as u8 to avoid unaligned access on ia64 */
2096 hlen = (hdr.network[0] & 0x0F) << 2;
2097 l4_proto = hdr.ipv4->protocol;
2099 hlen = hdr.network - skb->data;
2100 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2101 hlen -= hdr.network - skb->data;
2104 if (l4_proto != IPPROTO_TCP)
2107 th = (struct tcphdr *)(hdr.network + hlen);
2109 /* Due to lack of space, no more new filters can be programmed */
2110 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2112 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2113 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
2114 /* HW ATR eviction will take care of removing filters on FIN
2117 if (th->fin || th->rst)
2121 tx_ring->atr_count++;
2123 /* sample on all syn/fin/rst packets or once every atr sample rate */
2127 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2130 tx_ring->atr_count = 0;
2132 /* grab the next descriptor */
2133 i = tx_ring->next_to_use;
2134 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2137 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2139 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2140 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2141 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2142 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2143 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2144 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2145 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2147 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2149 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2151 dtype_cmd |= (th->fin || th->rst) ?
2152 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2153 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2154 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2155 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2157 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2158 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2160 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2161 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2163 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2164 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2166 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2167 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2168 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2171 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2172 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2173 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2175 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2176 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
2177 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2179 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2180 fdir_desc->rsvd = cpu_to_le32(0);
2181 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2182 fdir_desc->fd_id = cpu_to_le32(0);
2186 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2188 * @tx_ring: ring to send buffer on
2189 * @flags: the tx flags to be set
2191 * Checks the skb and set up correspondingly several generic transmit flags
2192 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2194 * Returns error code indicate the frame should be dropped upon error and the
2195 * otherwise returns 0 to indicate the flags has been set properly.
2198 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2199 struct i40e_ring *tx_ring,
2202 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2203 struct i40e_ring *tx_ring,
2207 __be16 protocol = skb->protocol;
2210 if (protocol == htons(ETH_P_8021Q) &&
2211 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2212 /* When HW VLAN acceleration is turned off by the user the
2213 * stack sets the protocol to 8021q so that the driver
2214 * can take any steps required to support the SW only
2215 * VLAN handling. In our case the driver doesn't need
2216 * to take any further steps so just set the protocol
2217 * to the encapsulated ethertype.
2219 skb->protocol = vlan_get_protocol(skb);
2223 /* if we have a HW VLAN tag being added, default to the HW one */
2224 if (skb_vlan_tag_present(skb)) {
2225 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2226 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2227 /* else if it is a SW VLAN, check the next protocol and store the tag */
2228 } else if (protocol == htons(ETH_P_8021Q)) {
2229 struct vlan_hdr *vhdr, _vhdr;
2231 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2235 protocol = vhdr->h_vlan_encapsulated_proto;
2236 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2237 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2240 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2243 /* Insert 802.1p priority into VLAN header */
2244 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2245 (skb->priority != TC_PRIO_CONTROL)) {
2246 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2247 tx_flags |= (skb->priority & 0x7) <<
2248 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2249 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2250 struct vlan_ethhdr *vhdr;
2253 rc = skb_cow_head(skb, 0);
2256 vhdr = (struct vlan_ethhdr *)skb->data;
2257 vhdr->h_vlan_TCI = htons(tx_flags >>
2258 I40E_TX_FLAGS_VLAN_SHIFT);
2260 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2270 * i40e_tso - set up the tso context descriptor
2271 * @skb: ptr to the skb we're sending
2272 * @hdr_len: ptr to the size of the packet header
2273 * @cd_type_cmd_tso_mss: Quad Word 1
2275 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2277 static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2279 u64 cd_cmd, cd_tso_len, cd_mss;
2290 u32 paylen, l4_offset;
2293 if (skb->ip_summed != CHECKSUM_PARTIAL)
2296 if (!skb_is_gso(skb))
2299 err = skb_cow_head(skb, 0);
2303 ip.hdr = skb_network_header(skb);
2304 l4.hdr = skb_transport_header(skb);
2306 /* initialize outer IP header fields */
2307 if (ip.v4->version == 4) {
2311 ip.v6->payload_len = 0;
2314 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2318 SKB_GSO_UDP_TUNNEL |
2319 SKB_GSO_UDP_TUNNEL_CSUM)) {
2320 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2321 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2324 /* determine offset of outer transport header */
2325 l4_offset = l4.hdr - skb->data;
2327 /* remove payload length from outer checksum */
2328 paylen = skb->len - l4_offset;
2329 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
2332 /* reset pointers to inner headers */
2333 ip.hdr = skb_inner_network_header(skb);
2334 l4.hdr = skb_inner_transport_header(skb);
2336 /* initialize inner IP header fields */
2337 if (ip.v4->version == 4) {
2341 ip.v6->payload_len = 0;
2345 /* determine offset of inner transport header */
2346 l4_offset = l4.hdr - skb->data;
2348 /* remove payload length from inner checksum */
2349 paylen = skb->len - l4_offset;
2350 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
2352 /* compute length of segmentation header */
2353 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2355 /* find the field values */
2356 cd_cmd = I40E_TX_CTX_DESC_TSO;
2357 cd_tso_len = skb->len - *hdr_len;
2358 cd_mss = skb_shinfo(skb)->gso_size;
2359 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2360 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2361 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2366 * i40e_tsyn - set up the tsyn context descriptor
2367 * @tx_ring: ptr to the ring to send
2368 * @skb: ptr to the skb we're sending
2369 * @tx_flags: the collected send information
2370 * @cd_type_cmd_tso_mss: Quad Word 1
2372 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2374 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2375 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2379 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2382 /* Tx timestamps cannot be sampled when doing TSO */
2383 if (tx_flags & I40E_TX_FLAGS_TSO)
2386 /* only timestamp the outbound packet if the user has requested it and
2387 * we are not already transmitting a packet to be timestamped
2389 pf = i40e_netdev_to_pf(tx_ring->netdev);
2390 if (!(pf->flags & I40E_FLAG_PTP))
2394 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2395 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2396 pf->ptp_tx_skb = skb_get(skb);
2401 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2402 I40E_TXD_CTX_QW1_CMD_SHIFT;
2408 * i40e_tx_enable_csum - Enable Tx checksum offloads
2410 * @tx_flags: pointer to Tx flags currently set
2411 * @td_cmd: Tx descriptor command bits to set
2412 * @td_offset: Tx descriptor header offsets to set
2413 * @tx_ring: Tx descriptor ring
2414 * @cd_tunneling: ptr to context desc bits
2416 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2417 u32 *td_cmd, u32 *td_offset,
2418 struct i40e_ring *tx_ring,
2431 unsigned char *exthdr;
2432 u32 offset, cmd = 0;
2436 if (skb->ip_summed != CHECKSUM_PARTIAL)
2439 ip.hdr = skb_network_header(skb);
2440 l4.hdr = skb_transport_header(skb);
2442 /* compute outer L2 header size */
2443 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2445 if (skb->encapsulation) {
2447 /* define outer network header type */
2448 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2449 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2450 I40E_TX_CTX_EXT_IP_IPV4 :
2451 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2453 l4_proto = ip.v4->protocol;
2454 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2457 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
2459 exthdr = ip.hdr + sizeof(*ip.v6);
2460 l4_proto = ip.v6->nexthdr;
2461 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
2462 &l4_proto, &frag_off);
2467 /* define outer transport */
2470 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
2471 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2474 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2475 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2479 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2480 l4.hdr = skb_inner_network_header(skb);
2483 if (*tx_flags & I40E_TX_FLAGS_TSO)
2486 skb_checksum_help(skb);
2490 /* compute outer L3 header size */
2491 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2492 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2494 /* switch IP header pointer from outer to inner header */
2495 ip.hdr = skb_inner_network_header(skb);
2497 /* compute tunnel header size */
2498 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2499 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2501 /* indicate if we need to offload outer UDP header */
2502 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2503 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2504 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2505 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2507 /* record tunnel offload values */
2508 *cd_tunneling |= tunnel;
2510 /* switch L4 header pointer from outer to inner */
2511 l4.hdr = skb_inner_transport_header(skb);
2514 /* reset type as we transition from outer to inner headers */
2515 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2516 if (ip.v4->version == 4)
2517 *tx_flags |= I40E_TX_FLAGS_IPV4;
2518 if (ip.v6->version == 6)
2519 *tx_flags |= I40E_TX_FLAGS_IPV6;
2522 /* Enable IP checksum offloads */
2523 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2524 l4_proto = ip.v4->protocol;
2525 /* the stack computes the IP header already, the only time we
2526 * need the hardware to recompute it is in the case of TSO.
2528 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2529 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2530 I40E_TX_DESC_CMD_IIPT_IPV4;
2531 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2532 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2534 exthdr = ip.hdr + sizeof(*ip.v6);
2535 l4_proto = ip.v6->nexthdr;
2536 if (l4.hdr != exthdr)
2537 ipv6_skip_exthdr(skb, exthdr - skb->data,
2538 &l4_proto, &frag_off);
2541 /* compute inner L3 header size */
2542 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2544 /* Enable L4 checksum offloads */
2547 /* enable checksum offloads */
2548 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2549 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2552 /* enable SCTP checksum offload */
2553 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2554 offset |= (sizeof(struct sctphdr) >> 2) <<
2555 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2558 /* enable UDP checksum offload */
2559 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2560 offset |= (sizeof(struct udphdr) >> 2) <<
2561 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2564 if (*tx_flags & I40E_TX_FLAGS_TSO)
2566 skb_checksum_help(skb);
2571 *td_offset |= offset;
2577 * i40e_create_tx_ctx Build the Tx context descriptor
2578 * @tx_ring: ring to create the descriptor on
2579 * @cd_type_cmd_tso_mss: Quad Word 1
2580 * @cd_tunneling: Quad Word 0 - bits 0-31
2581 * @cd_l2tag2: Quad Word 0 - bits 32-63
2583 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2584 const u64 cd_type_cmd_tso_mss,
2585 const u32 cd_tunneling, const u32 cd_l2tag2)
2587 struct i40e_tx_context_desc *context_desc;
2588 int i = tx_ring->next_to_use;
2590 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2591 !cd_tunneling && !cd_l2tag2)
2594 /* grab the next descriptor */
2595 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2598 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2600 /* cpu_to_le32 and assign to struct fields */
2601 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2602 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2603 context_desc->rsvd = cpu_to_le16(0);
2604 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2608 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2609 * @tx_ring: the ring to be checked
2610 * @size: the size buffer we want to assure is available
2612 * Returns -EBUSY if a stop is needed, else 0
2614 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2616 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2617 /* Memory barrier before checking head and tail */
2620 /* Check again in a case another CPU has just made room available. */
2621 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2624 /* A reprieve! - use start_queue because it doesn't call schedule */
2625 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2626 ++tx_ring->tx_stats.restart_queue;
2631 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2634 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2635 * and so we need to figure out the cases where we need to linearize the skb.
2637 * For TSO we need to count the TSO header and segment payload separately.
2638 * As such we need to check cases where we have 7 fragments or more as we
2639 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2640 * the segment payload in the first descriptor, and another 7 for the
2643 bool __i40e_chk_linearize(struct sk_buff *skb)
2645 const struct skb_frag_struct *frag, *stale;
2648 /* no need to check if number of frags is less than 7 */
2649 nr_frags = skb_shinfo(skb)->nr_frags;
2650 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
2653 /* We need to walk through the list and validate that each group
2654 * of 6 fragments totals at least gso_size.
2656 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
2657 frag = &skb_shinfo(skb)->frags[0];
2659 /* Initialize size to the negative value of gso_size minus 1. We
2660 * use this as the worst case scenerio in which the frag ahead
2661 * of us only provides one byte which is why we are limited to 6
2662 * descriptors for a single transmit as the header and previous
2663 * fragment are already consuming 2 descriptors.
2665 sum = 1 - skb_shinfo(skb)->gso_size;
2667 /* Add size of frags 0 through 4 to create our initial sum */
2668 sum += skb_frag_size(frag++);
2669 sum += skb_frag_size(frag++);
2670 sum += skb_frag_size(frag++);
2671 sum += skb_frag_size(frag++);
2672 sum += skb_frag_size(frag++);
2674 /* Walk through fragments adding latest fragment, testing it, and
2675 * then removing stale fragments from the sum.
2677 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2678 int stale_size = skb_frag_size(stale);
2680 sum += skb_frag_size(frag++);
2682 /* The stale fragment may present us with a smaller
2683 * descriptor than the actual fragment size. To account
2684 * for that we need to remove all the data on the front and
2685 * figure out what the remainder would be in the last
2686 * descriptor associated with the fragment.
2688 if (stale_size > I40E_MAX_DATA_PER_TXD) {
2689 int align_pad = -(stale->page_offset) &
2690 (I40E_MAX_READ_REQ_SIZE - 1);
2693 stale_size -= align_pad;
2696 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2697 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2698 } while (stale_size > I40E_MAX_DATA_PER_TXD);
2701 /* if sum is negative we failed to make sufficient progress */
2715 * i40e_tx_map - Build the Tx descriptor
2716 * @tx_ring: ring to send buffer on
2718 * @first: first buffer info buffer to use
2719 * @tx_flags: collected send information
2720 * @hdr_len: size of the packet header
2721 * @td_cmd: the command field in the descriptor
2722 * @td_offset: offset for checksum or crc
2725 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2726 struct i40e_tx_buffer *first, u32 tx_flags,
2727 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2729 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2730 struct i40e_tx_buffer *first, u32 tx_flags,
2731 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2734 unsigned int data_len = skb->data_len;
2735 unsigned int size = skb_headlen(skb);
2736 struct skb_frag_struct *frag;
2737 struct i40e_tx_buffer *tx_bi;
2738 struct i40e_tx_desc *tx_desc;
2739 u16 i = tx_ring->next_to_use;
2744 bool tail_bump = true;
2747 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2748 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2749 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2750 I40E_TX_FLAGS_VLAN_SHIFT;
2753 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2754 gso_segs = skb_shinfo(skb)->gso_segs;
2758 /* multiply data chunks by size of headers */
2759 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2760 first->gso_segs = gso_segs;
2762 first->tx_flags = tx_flags;
2764 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2766 tx_desc = I40E_TX_DESC(tx_ring, i);
2769 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2770 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2772 if (dma_mapping_error(tx_ring->dev, dma))
2775 /* record length, and DMA address */
2776 dma_unmap_len_set(tx_bi, len, size);
2777 dma_unmap_addr_set(tx_bi, dma, dma);
2779 /* align size to end of page */
2780 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
2781 tx_desc->buffer_addr = cpu_to_le64(dma);
2783 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2784 tx_desc->cmd_type_offset_bsz =
2785 build_ctob(td_cmd, td_offset,
2792 if (i == tx_ring->count) {
2793 tx_desc = I40E_TX_DESC(tx_ring, 0);
2800 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2801 tx_desc->buffer_addr = cpu_to_le64(dma);
2804 if (likely(!data_len))
2807 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2814 if (i == tx_ring->count) {
2815 tx_desc = I40E_TX_DESC(tx_ring, 0);
2819 size = skb_frag_size(frag);
2822 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2825 tx_bi = &tx_ring->tx_bi[i];
2828 /* set next_to_watch value indicating a packet is present */
2829 first->next_to_watch = tx_desc;
2832 if (i == tx_ring->count)
2835 tx_ring->next_to_use = i;
2837 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2838 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2840 /* Algorithm to optimize tail and RS bit setting:
2841 * if xmit_more is supported
2842 * if xmit_more is true
2843 * do not update tail and do not mark RS bit.
2844 * if xmit_more is false and last xmit_more was false
2845 * if every packet spanned less than 4 desc
2846 * then set RS bit on 4th packet and update tail
2849 * update tail and set RS bit on every packet.
2850 * if xmit_more is false and last_xmit_more was true
2851 * update tail and set RS bit.
2853 * Optimization: wmb to be issued only in case of tail update.
2854 * Also optimize the Descriptor WB path for RS bit with the same
2857 * Note: If there are less than 4 packets
2858 * pending and interrupts were disabled the service task will
2859 * trigger a force WB.
2861 if (skb->xmit_more &&
2862 !netif_xmit_stopped(txring_txq(tx_ring))) {
2863 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2865 } else if (!skb->xmit_more &&
2866 !netif_xmit_stopped(txring_txq(tx_ring)) &&
2867 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2868 (tx_ring->packet_stride < WB_STRIDE) &&
2869 (desc_count < WB_STRIDE)) {
2870 tx_ring->packet_stride++;
2872 tx_ring->packet_stride = 0;
2873 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2877 tx_ring->packet_stride = 0;
2879 tx_desc->cmd_type_offset_bsz =
2880 build_ctob(td_cmd, td_offset, size, td_tag) |
2881 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2882 I40E_TX_DESC_CMD_EOP) <<
2883 I40E_TXD_QW1_CMD_SHIFT);
2885 /* notify HW of packet */
2887 prefetchw(tx_desc + 1);
2889 /* Force memory writes to complete before letting h/w
2890 * know there are new descriptors to fetch. (Only
2891 * applicable for weak-ordered memory model archs,
2895 writel(i, tx_ring->tail);
2900 dev_info(tx_ring->dev, "TX DMA map failed\n");
2902 /* clear dma mappings for failed tx_bi map */
2904 tx_bi = &tx_ring->tx_bi[i];
2905 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2913 tx_ring->next_to_use = i;
2917 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2919 * @tx_ring: ring to send buffer on
2921 * Returns NETDEV_TX_OK if sent, else an error code
2923 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2924 struct i40e_ring *tx_ring)
2926 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2927 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2928 struct i40e_tx_buffer *first;
2937 /* prefetch the data, we'll need it later */
2938 prefetch(skb->data);
2940 count = i40e_xmit_descriptor_count(skb);
2941 if (i40e_chk_linearize(skb, count)) {
2942 if (__skb_linearize(skb))
2944 count = i40e_txd_use_count(skb->len);
2945 tx_ring->tx_stats.tx_linearize++;
2948 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2949 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2950 * + 4 desc gap to avoid the cache line where head is,
2951 * + 1 desc for context descriptor,
2952 * otherwise try next time
2954 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2955 tx_ring->tx_stats.tx_busy++;
2956 return NETDEV_TX_BUSY;
2959 /* prepare the xmit flags */
2960 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2963 /* obtain protocol of skb */
2964 protocol = vlan_get_protocol(skb);
2966 /* record the location of the first descriptor for this packet */
2967 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2969 /* setup IPv4/IPv6 offloads */
2970 if (protocol == htons(ETH_P_IP))
2971 tx_flags |= I40E_TX_FLAGS_IPV4;
2972 else if (protocol == htons(ETH_P_IPV6))
2973 tx_flags |= I40E_TX_FLAGS_IPV6;
2975 tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
2980 tx_flags |= I40E_TX_FLAGS_TSO;
2982 /* Always offload the checksum, since it's in the data descriptor */
2983 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2984 tx_ring, &cd_tunneling);
2988 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2991 tx_flags |= I40E_TX_FLAGS_TSYN;
2993 skb_tx_timestamp(skb);
2995 /* always enable CRC insertion offload */
2996 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2998 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2999 cd_tunneling, cd_l2tag2);
3001 /* Add Flow Director ATR if it's enabled.
3003 * NOTE: this must always be directly before the data descriptor.
3005 i40e_atr(tx_ring, skb, tx_flags);
3007 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3010 return NETDEV_TX_OK;
3013 dev_kfree_skb_any(skb);
3014 return NETDEV_TX_OK;
3018 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3020 * @netdev: network interface device structure
3022 * Returns NETDEV_TX_OK if sent, else an error code
3024 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3026 struct i40e_netdev_priv *np = netdev_priv(netdev);
3027 struct i40e_vsi *vsi = np->vsi;
3028 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3030 /* hardware can't handle really short frames, hardware padding works
3033 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3034 return NETDEV_TX_OK;
3036 return i40e_xmit_frame_ring(skb, tx_ring);