GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / net / ethernet / qualcomm / rmnet / rmnet_map_data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
3  *
4  * RMNET Data MAP protocol
5  */
6
7 #include <linux/netdevice.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <net/ip6_checksum.h>
11 #include <linux/bitfield.h>
12 #include "rmnet_config.h"
13 #include "rmnet_map.h"
14 #include "rmnet_private.h"
15
16 #define RMNET_MAP_DEAGGR_SPACING  64
17 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
18
19 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
20                                          const void *txporthdr)
21 {
22         if (protocol == IPPROTO_TCP)
23                 return &((struct tcphdr *)txporthdr)->check;
24
25         if (protocol == IPPROTO_UDP)
26                 return &((struct udphdr *)txporthdr)->check;
27
28         return NULL;
29 }
30
31 static int
32 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
33                                struct rmnet_map_dl_csum_trailer *csum_trailer,
34                                struct rmnet_priv *priv)
35 {
36         struct iphdr *ip4h = (struct iphdr *)skb->data;
37         void *txporthdr = skb->data + ip4h->ihl * 4;
38         __sum16 *csum_field, pseudo_csum;
39         __sum16 ip_payload_csum;
40
41         /* Computing the checksum over just the IPv4 header--including its
42          * checksum field--should yield 0.  If it doesn't, the IP header
43          * is bad, so return an error and let the IP layer drop it.
44          */
45         if (ip_fast_csum(ip4h, ip4h->ihl)) {
46                 priv->stats.csum_ip4_header_bad++;
47                 return -EINVAL;
48         }
49
50         /* We don't support checksum offload on IPv4 fragments */
51         if (ip_is_fragment(ip4h)) {
52                 priv->stats.csum_fragmented_pkt++;
53                 return -EOPNOTSUPP;
54         }
55
56         /* Checksum offload is only supported for UDP and TCP protocols */
57         csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
58         if (!csum_field) {
59                 priv->stats.csum_err_invalid_transport++;
60                 return -EPROTONOSUPPORT;
61         }
62
63         /* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
64         if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
65                 priv->stats.csum_skipped++;
66                 return 0;
67         }
68
69         /* The checksum value in the trailer is computed over the entire
70          * IP packet, including the IP header and payload.  To derive the
71          * transport checksum from this, we first subract the contribution
72          * of the IP header from the trailer checksum.  We then add the
73          * checksum computed over the pseudo header.
74          *
75          * We verified above that the IP header contributes zero to the
76          * trailer checksum.  Therefore the checksum in the trailer is
77          * just the checksum computed over the IP payload.
78
79          * If the IP payload arrives intact, adding the pseudo header
80          * checksum to the IP payload checksum will yield 0xffff (negative
81          * zero).  This means the trailer checksum and the pseudo checksum
82          * are additive inverses of each other.  Put another way, the
83          * message passes the checksum test if the trailer checksum value
84          * is the negated pseudo header checksum.
85          *
86          * Knowing this, we don't even need to examine the transport
87          * header checksum value; it is already accounted for in the
88          * checksum value found in the trailer.
89          */
90         ip_payload_csum = csum_trailer->csum_value;
91
92         pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
93                                         ntohs(ip4h->tot_len) - ip4h->ihl * 4,
94                                         ip4h->protocol, 0);
95
96         /* The cast is required to ensure only the low 16 bits are examined */
97         if (ip_payload_csum != (__sum16)~pseudo_csum) {
98                 priv->stats.csum_validation_failed++;
99                 return -EINVAL;
100         }
101
102         priv->stats.csum_ok++;
103         return 0;
104 }
105
106 #if IS_ENABLED(CONFIG_IPV6)
107 static int
108 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
109                                struct rmnet_map_dl_csum_trailer *csum_trailer,
110                                struct rmnet_priv *priv)
111 {
112         struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
113         void *txporthdr = skb->data + sizeof(*ip6h);
114         __sum16 *csum_field, pseudo_csum;
115         __sum16 ip6_payload_csum;
116         __be16 ip_header_csum;
117
118         /* Checksum offload is only supported for UDP and TCP protocols;
119          * the packet cannot include any IPv6 extension headers
120          */
121         csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
122         if (!csum_field) {
123                 priv->stats.csum_err_invalid_transport++;
124                 return -EPROTONOSUPPORT;
125         }
126
127         /* The checksum value in the trailer is computed over the entire
128          * IP packet, including the IP header and payload.  To derive the
129          * transport checksum from this, we first subract the contribution
130          * of the IP header from the trailer checksum.  We then add the
131          * checksum computed over the pseudo header.
132          */
133         ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
134         ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
135
136         pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
137                                       ntohs(ip6h->payload_len),
138                                       ip6h->nexthdr, 0);
139
140         /* It's sufficient to compare the IP payload checksum with the
141          * negated pseudo checksum to determine whether the packet
142          * checksum was good.  (See further explanation in comments
143          * in rmnet_map_ipv4_dl_csum_trailer()).
144          *
145          * The cast is required to ensure only the low 16 bits are
146          * examined.
147          */
148         if (ip6_payload_csum != (__sum16)~pseudo_csum) {
149                 priv->stats.csum_validation_failed++;
150                 return -EINVAL;
151         }
152
153         priv->stats.csum_ok++;
154         return 0;
155 }
156 #else
157 static int
158 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
159                                struct rmnet_map_dl_csum_trailer *csum_trailer,
160                                struct rmnet_priv *priv)
161 {
162         return 0;
163 }
164 #endif
165
166 static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
167 {
168         void *txphdr;
169         u16 *csum;
170
171         txphdr = (void *)ip4h + ip4h->ihl * 4;
172
173         if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
174                 csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
175                 *csum = ~(*csum);
176         }
177 }
178
179 static void
180 rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
181                               struct rmnet_map_ul_csum_header *ul_header,
182                               struct sk_buff *skb)
183 {
184         u16 val;
185
186         val = MAP_CSUM_UL_ENABLED_FLAG;
187         if (iphdr->protocol == IPPROTO_UDP)
188                 val |= MAP_CSUM_UL_UDP_FLAG;
189         val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
190
191         ul_header->csum_start_offset = htons(skb_network_header_len(skb));
192         ul_header->csum_info = htons(val);
193
194         skb->ip_summed = CHECKSUM_NONE;
195
196         rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
197 }
198
199 #if IS_ENABLED(CONFIG_IPV6)
200 static void
201 rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
202 {
203         void *txphdr;
204         u16 *csum;
205
206         txphdr = ip6h + 1;
207
208         if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
209                 csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
210                 *csum = ~(*csum);
211         }
212 }
213
214 static void
215 rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
216                               struct rmnet_map_ul_csum_header *ul_header,
217                               struct sk_buff *skb)
218 {
219         u16 val;
220
221         val = MAP_CSUM_UL_ENABLED_FLAG;
222         if (ipv6hdr->nexthdr == IPPROTO_UDP)
223                 val |= MAP_CSUM_UL_UDP_FLAG;
224         val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
225
226         ul_header->csum_start_offset = htons(skb_network_header_len(skb));
227         ul_header->csum_info = htons(val);
228
229         skb->ip_summed = CHECKSUM_NONE;
230
231         rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
232 }
233 #else
234 static void
235 rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
236                               struct rmnet_map_ul_csum_header *ul_header,
237                               struct sk_buff *skb)
238 {
239 }
240 #endif
241
242 static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
243                                                 struct rmnet_port *port,
244                                                 struct net_device *orig_dev)
245 {
246         struct rmnet_priv *priv = netdev_priv(orig_dev);
247         struct rmnet_map_v5_csum_header *ul_header;
248
249         ul_header = skb_push(skb, sizeof(*ul_header));
250         memset(ul_header, 0, sizeof(*ul_header));
251         ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
252                                                 MAPV5_HDRINFO_HDR_TYPE_FMASK);
253
254         if (skb->ip_summed == CHECKSUM_PARTIAL) {
255                 void *iph = ip_hdr(skb);
256                 __sum16 *check;
257                 void *trans;
258                 u8 proto;
259
260                 if (skb->protocol == htons(ETH_P_IP)) {
261                         u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
262
263                         proto = ((struct iphdr *)iph)->protocol;
264                         trans = iph + ip_len;
265                 } else if (IS_ENABLED(CONFIG_IPV6) &&
266                            skb->protocol == htons(ETH_P_IPV6)) {
267                         u16 ip_len = sizeof(struct ipv6hdr);
268
269                         proto = ((struct ipv6hdr *)iph)->nexthdr;
270                         trans = iph + ip_len;
271                 } else {
272                         priv->stats.csum_err_invalid_ip_version++;
273                         goto sw_csum;
274                 }
275
276                 check = rmnet_map_get_csum_field(proto, trans);
277                 if (check) {
278                         skb->ip_summed = CHECKSUM_NONE;
279                         /* Ask for checksum offloading */
280                         ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
281                         priv->stats.csum_hw++;
282                         return;
283                 }
284         }
285
286 sw_csum:
287         priv->stats.csum_sw++;
288 }
289
290 /* Adds MAP header to front of skb->data
291  * Padding is calculated and set appropriately in MAP header. Mux ID is
292  * initialized to 0.
293  */
294 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
295                                                   int hdrlen,
296                                                   struct rmnet_port *port,
297                                                   int pad)
298 {
299         struct rmnet_map_header *map_header;
300         u32 padding, map_datalen;
301
302         map_datalen = skb->len - hdrlen;
303         map_header = (struct rmnet_map_header *)
304                         skb_push(skb, sizeof(struct rmnet_map_header));
305         memset(map_header, 0, sizeof(struct rmnet_map_header));
306
307         /* Set next_hdr bit for csum offload packets */
308         if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
309                 map_header->flags |= MAP_NEXT_HEADER_FLAG;
310
311         if (pad == RMNET_MAP_NO_PAD_BYTES) {
312                 map_header->pkt_len = htons(map_datalen);
313                 return map_header;
314         }
315
316         BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
317         padding = ALIGN(map_datalen, 4) - map_datalen;
318
319         if (padding == 0)
320                 goto done;
321
322         if (skb_tailroom(skb) < padding)
323                 return NULL;
324
325         skb_put_zero(skb, padding);
326
327 done:
328         map_header->pkt_len = htons(map_datalen + padding);
329         /* This is a data packet, so the CMD bit is 0 */
330         map_header->flags = padding & MAP_PAD_LEN_MASK;
331
332         return map_header;
333 }
334
335 /* Deaggregates a single packet
336  * A whole new buffer is allocated for each portion of an aggregated frame.
337  * Caller should keep calling deaggregate() on the source skb until 0 is
338  * returned, indicating that there are no more packets to deaggregate. Caller
339  * is responsible for freeing the original skb.
340  */
341 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
342                                       struct rmnet_port *port)
343 {
344         struct rmnet_map_v5_csum_header *next_hdr = NULL;
345         struct rmnet_map_header *maph;
346         void *data = skb->data;
347         struct sk_buff *skbn;
348         u8 nexthdr_type;
349         u32 packet_len;
350
351         if (skb->len == 0)
352                 return NULL;
353
354         maph = (struct rmnet_map_header *)skb->data;
355         packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
356
357         if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
358                 packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
359         } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
360                 if (!(maph->flags & MAP_CMD_FLAG)) {
361                         packet_len += sizeof(*next_hdr);
362                         if (maph->flags & MAP_NEXT_HEADER_FLAG)
363                                 next_hdr = data + sizeof(*maph);
364                         else
365                                 /* Mapv5 data pkt without csum hdr is invalid */
366                                 return NULL;
367                 }
368         }
369
370         if (((int)skb->len - (int)packet_len) < 0)
371                 return NULL;
372
373         /* Some hardware can send us empty frames. Catch them */
374         if (!maph->pkt_len)
375                 return NULL;
376
377         if (next_hdr) {
378                 nexthdr_type = u8_get_bits(next_hdr->header_info,
379                                            MAPV5_HDRINFO_HDR_TYPE_FMASK);
380                 if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
381                         return NULL;
382         }
383
384         skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
385         if (!skbn)
386                 return NULL;
387
388         skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
389         skb_put(skbn, packet_len);
390         memcpy(skbn->data, skb->data, packet_len);
391         skb_pull(skb, packet_len);
392
393         return skbn;
394 }
395
396 /* Validates packet checksums. Function takes a pointer to
397  * the beginning of a buffer which contains the IP payload +
398  * padding + checksum trailer.
399  * Only IPv4 and IPv6 are supported along with TCP & UDP.
400  * Fragmented or tunneled packets are not supported.
401  */
402 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
403 {
404         struct rmnet_priv *priv = netdev_priv(skb->dev);
405         struct rmnet_map_dl_csum_trailer *csum_trailer;
406
407         if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
408                 priv->stats.csum_sw++;
409                 return -EOPNOTSUPP;
410         }
411
412         csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
413
414         if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
415                 priv->stats.csum_valid_unset++;
416                 return -EINVAL;
417         }
418
419         if (skb->protocol == htons(ETH_P_IP))
420                 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
421
422         if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
423                 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
424
425         priv->stats.csum_err_invalid_ip_version++;
426
427         return -EPROTONOSUPPORT;
428 }
429
430 static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
431                                                 struct net_device *orig_dev)
432 {
433         struct rmnet_priv *priv = netdev_priv(orig_dev);
434         struct rmnet_map_ul_csum_header *ul_header;
435         void *iphdr;
436
437         ul_header = (struct rmnet_map_ul_csum_header *)
438                     skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
439
440         if (unlikely(!(orig_dev->features &
441                      (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
442                 goto sw_csum;
443
444         if (skb->ip_summed != CHECKSUM_PARTIAL)
445                 goto sw_csum;
446
447         iphdr = (char *)ul_header +
448                 sizeof(struct rmnet_map_ul_csum_header);
449
450         if (skb->protocol == htons(ETH_P_IP)) {
451                 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
452                 priv->stats.csum_hw++;
453                 return;
454         }
455
456         if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
457                 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
458                 priv->stats.csum_hw++;
459                 return;
460         }
461
462         priv->stats.csum_err_invalid_ip_version++;
463
464 sw_csum:
465         memset(ul_header, 0, sizeof(*ul_header));
466
467         priv->stats.csum_sw++;
468 }
469
470 /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
471  * packets that are supported for UL checksum offload.
472  */
473 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
474                                       struct rmnet_port *port,
475                                       struct net_device *orig_dev,
476                                       int csum_type)
477 {
478         switch (csum_type) {
479         case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
480                 rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
481                 break;
482         case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
483                 rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
484                 break;
485         default:
486                 break;
487         }
488 }
489
490 /* Process a MAPv5 packet header */
491 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
492                                       u16 len)
493 {
494         struct rmnet_priv *priv = netdev_priv(skb->dev);
495         struct rmnet_map_v5_csum_header *next_hdr;
496         u8 nexthdr_type;
497
498         next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
499                         sizeof(struct rmnet_map_header));
500
501         nexthdr_type = u8_get_bits(next_hdr->header_info,
502                                    MAPV5_HDRINFO_HDR_TYPE_FMASK);
503
504         if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
505                 return -EINVAL;
506
507         if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
508                 priv->stats.csum_sw++;
509         } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
510                 priv->stats.csum_ok++;
511                 skb->ip_summed = CHECKSUM_UNNECESSARY;
512         } else {
513                 priv->stats.csum_valid_unset++;
514         }
515
516         /* Pull csum v5 header */
517         skb_pull(skb, sizeof(*next_hdr));
518
519         return 0;
520 }