GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / net / ethernet / intel / ice / ice_txrx_lib.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include "ice_txrx_lib.h"
5
6 /**
7  * ice_release_rx_desc - Store the new tail and head values
8  * @rx_ring: ring to bump
9  * @val: new head index
10  */
11 void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
12 {
13         u16 prev_ntu = rx_ring->next_to_use & ~0x7;
14
15         rx_ring->next_to_use = val;
16
17         /* update next to alloc since we have filled the ring */
18         rx_ring->next_to_alloc = val;
19
20         /* QRX_TAIL will be updated with any tail value, but hardware ignores
21          * the lower 3 bits. This makes it so we only bump tail on meaningful
22          * boundaries. Also, this allows us to bump tail on intervals of 8 up to
23          * the budget depending on the current traffic load.
24          */
25         val &= ~0x7;
26         if (prev_ntu != val) {
27                 /* Force memory writes to complete before letting h/w
28                  * know there are new descriptors to fetch. (Only
29                  * applicable for weak-ordered memory model archs,
30                  * such as IA-64).
31                  */
32                 wmb();
33                 writel(val, rx_ring->tail);
34         }
35 }
36
37 /**
38  * ice_ptype_to_htype - get a hash type
39  * @ptype: the ptype value from the descriptor
40  *
41  * Returns a hash type to be used by skb_set_hash
42  */
43 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
44 {
45         return PKT_HASH_TYPE_NONE;
46 }
47
48 /**
49  * ice_rx_hash - set the hash value in the skb
50  * @rx_ring: descriptor ring
51  * @rx_desc: specific descriptor
52  * @skb: pointer to current skb
53  * @rx_ptype: the ptype value from the descriptor
54  */
55 static void
56 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
57             struct sk_buff *skb, u8 rx_ptype)
58 {
59         struct ice_32b_rx_flex_desc_nic *nic_mdid;
60         u32 hash;
61
62         if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
63                 return;
64
65         if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
66                 return;
67
68         nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
69         hash = le32_to_cpu(nic_mdid->rss_hash);
70         skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
71 }
72
73 /**
74  * ice_rx_csum - Indicate in skb if checksum is good
75  * @ring: the ring we care about
76  * @skb: skb currently being received and modified
77  * @rx_desc: the receive descriptor
78  * @ptype: the packet type decoded by hardware
79  *
80  * skb->protocol must be set before this function is called
81  */
82 static void
83 ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
84             union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
85 {
86         struct ice_rx_ptype_decoded decoded;
87         u16 rx_status0, rx_status1;
88         bool ipv4, ipv6;
89
90         rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
91         rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
92
93         decoded = ice_decode_rx_desc_ptype(ptype);
94
95         /* Start with CHECKSUM_NONE and by default csum_level = 0 */
96         skb->ip_summed = CHECKSUM_NONE;
97         skb_checksum_none_assert(skb);
98
99         /* check if Rx checksum is enabled */
100         if (!(ring->netdev->features & NETIF_F_RXCSUM))
101                 return;
102
103         /* check if HW has decoded the packet and checksum */
104         if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
105                 return;
106
107         if (!(decoded.known && decoded.outer_ip))
108                 return;
109
110         ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
111                (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
112         ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
113                (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
114
115         if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
116                                    BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
117                 goto checksum_fail;
118
119         if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
120                 goto checksum_fail;
121
122         /* check for L4 errors and handle packets that were not able to be
123          * checksummed due to arrival speed
124          */
125         if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
126                 goto checksum_fail;
127
128         /* check for outer UDP checksum error in tunneled packets */
129         if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
130             (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
131                 goto checksum_fail;
132
133         /* If there is an outer header present that might contain a checksum
134          * we need to bump the checksum level by 1 to reflect the fact that
135          * we are indicating we validated the inner checksum.
136          */
137         if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
138                 skb->csum_level = 1;
139
140         /* Only report checksum unnecessary for TCP, UDP, or SCTP */
141         switch (decoded.inner_prot) {
142         case ICE_RX_PTYPE_INNER_PROT_TCP:
143         case ICE_RX_PTYPE_INNER_PROT_UDP:
144         case ICE_RX_PTYPE_INNER_PROT_SCTP:
145                 skb->ip_summed = CHECKSUM_UNNECESSARY;
146         default:
147                 break;
148         }
149         return;
150
151 checksum_fail:
152         ring->vsi->back->hw_csum_rx_error++;
153 }
154
155 /**
156  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
157  * @rx_ring: Rx descriptor ring packet is being transacted on
158  * @rx_desc: pointer to the EOP Rx descriptor
159  * @skb: pointer to current skb being populated
160  * @ptype: the packet type decoded by hardware
161  *
162  * This function checks the ring, descriptor, and packet information in
163  * order to populate the hash, checksum, VLAN, protocol, and
164  * other fields within the skb.
165  */
166 void
167 ice_process_skb_fields(struct ice_ring *rx_ring,
168                        union ice_32b_rx_flex_desc *rx_desc,
169                        struct sk_buff *skb, u8 ptype)
170 {
171         ice_rx_hash(rx_ring, rx_desc, skb, ptype);
172
173         /* modifies the skb - consumes the enet header */
174         skb->protocol = eth_type_trans(skb, rx_ring->netdev);
175
176         ice_rx_csum(rx_ring, skb, rx_desc, ptype);
177 }
178
179 /**
180  * ice_receive_skb - Send a completed packet up the stack
181  * @rx_ring: Rx ring in play
182  * @skb: packet to send up
183  * @vlan_tag: VLAN tag for packet
184  *
185  * This function sends the completed packet (via. skb) up the stack using
186  * gro receive functions (with/without VLAN tag)
187  */
188 void
189 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
190 {
191         if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
192             (vlan_tag & VLAN_VID_MASK))
193                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
194         if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
195                 /* this is tracked separately to help us debug stack drops */
196                 rx_ring->rx_stats.gro_dropped++;
197                 netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
198                            rx_ring->q_index);
199         }
200 }
201
202 /**
203  * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
204  * @data: packet data pointer
205  * @size: packet data size
206  * @xdp_ring: XDP ring for transmission
207  */
208 int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
209 {
210         u16 i = xdp_ring->next_to_use;
211         struct ice_tx_desc *tx_desc;
212         struct ice_tx_buf *tx_buf;
213         dma_addr_t dma;
214
215         if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
216                 xdp_ring->tx_stats.tx_busy++;
217                 return ICE_XDP_CONSUMED;
218         }
219
220         dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
221         if (dma_mapping_error(xdp_ring->dev, dma))
222                 return ICE_XDP_CONSUMED;
223
224         tx_buf = &xdp_ring->tx_buf[i];
225         tx_buf->bytecount = size;
226         tx_buf->gso_segs = 1;
227         tx_buf->raw_buf = data;
228
229         /* record length, and DMA address */
230         dma_unmap_len_set(tx_buf, len, size);
231         dma_unmap_addr_set(tx_buf, dma, dma);
232
233         tx_desc = ICE_TX_DESC(xdp_ring, i);
234         tx_desc->buf_addr = cpu_to_le64(dma);
235         tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
236                                                       size, 0);
237
238         /* Make certain all of the status bits have been updated
239          * before next_to_watch is written.
240          */
241         smp_wmb();
242
243         i++;
244         if (i == xdp_ring->count)
245                 i = 0;
246
247         tx_buf->next_to_watch = tx_desc;
248         xdp_ring->next_to_use = i;
249
250         return ICE_XDP_TX;
251 }
252
253 /**
254  * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
255  * @xdp: XDP buffer
256  * @xdp_ring: XDP Tx ring
257  *
258  * Returns negative on failure, 0 on success.
259  */
260 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
261 {
262         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
263
264         if (unlikely(!xdpf))
265                 return ICE_XDP_CONSUMED;
266
267         return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
268 }
269
270 /**
271  * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
272  * @rx_ring: Rx ring
273  * @xdp_res: Result of the receive batch
274  *
275  * This function bumps XDP Tx tail and/or flush redirect map, and
276  * should be called when a batch of packets has been processed in the
277  * napi loop.
278  */
279 void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
280 {
281         if (xdp_res & ICE_XDP_REDIR)
282                 xdp_do_flush_map();
283
284         if (xdp_res & ICE_XDP_TX) {
285                 struct ice_ring *xdp_ring =
286                         rx_ring->vsi->xdp_rings[rx_ring->q_index];
287
288                 ice_xdp_ring_update_tail(xdp_ring);
289         }
290 }