2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * Copyright (C) 2016 secunet Security Networks AG
6 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
15 #include <linux/skbuff.h>
16 #include <linux/init.h>
17 #include <net/protocol.h>
18 #include <crypto/aead.h>
19 #include <crypto/authenc.h>
20 #include <linux/err.h>
21 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
31 static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
34 int offset = skb_gro_offset(skb);
35 struct xfrm_offload *xo;
41 if (!pskb_pull(skb, offset))
44 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
47 xo = xfrm_offload(skb);
48 if (!xo || !(xo->flags & CRYPTO_DONE)) {
49 err = secpath_set(skb);
53 if (skb->sp->len == XFRM_MAX_DEPTH)
56 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
57 (xfrm_address_t *)&ip_hdr(skb)->daddr,
58 spi, IPPROTO_ESP, AF_INET);
62 skb->sp->xvec[skb->sp->len++] = x;
65 xo = xfrm_offload(skb);
72 xo->flags |= XFRM_GRO;
74 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
75 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
76 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
77 XFRM_SPI_SKB_CB(skb)->seq = seq;
79 /* We don't need to handle errors from xfrm_input, it does all
80 * the error handling and frees the resources on error. */
81 xfrm_input(skb, IPPROTO_ESP, spi, -2);
83 return ERR_PTR(-EINPROGRESS);
85 skb_push(skb, offset);
86 NAPI_GRO_CB(skb)->same_flow = 0;
87 NAPI_GRO_CB(skb)->flush = 1;
92 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
94 struct ip_esp_hdr *esph;
95 struct iphdr *iph = ip_hdr(skb);
96 struct xfrm_offload *xo = xfrm_offload(skb);
97 int proto = iph->protocol;
99 skb_push(skb, -skb_network_offset(skb));
100 esph = ip_esp_hdr(skb);
101 *skb_mac_header(skb) = IPPROTO_ESP;
103 esph->spi = x->id.spi;
104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
109 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
110 netdev_features_t features)
114 struct sk_buff *skb2;
115 struct xfrm_state *x;
116 struct ip_esp_hdr *esph;
117 struct crypto_aead *aead;
118 struct sk_buff *segs = ERR_PTR(-EINVAL);
119 netdev_features_t esp_features = features;
120 struct xfrm_offload *xo = xfrm_offload(skb);
125 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
130 x = skb->sp->xvec[skb->sp->len - 1];
132 esph = ip_esp_hdr(skb);
134 if (esph->spi != x->id.spi)
137 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
140 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
142 skb->encap_hdr_csum = 1;
144 if (!(features & NETIF_F_HW_ESP))
145 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
147 segs = x->outer_mode->gso_segment(x, skb, esp_features);
148 if (IS_ERR_OR_NULL(segs))
151 __skb_pull(skb, skb->data - skb_mac_header(skb));
155 struct sk_buff *nskb = skb2->next;
157 xo = xfrm_offload(skb2);
158 xo->flags |= XFRM_GSO_SEGMENT;
160 xo->seq.hi = xfrm_replay_seqhi(x, seq);
162 if(!(features & NETIF_F_HW_ESP))
163 xo->flags |= CRYPTO_FALLBACK;
165 x->outer_mode->xmit(x, skb2);
167 err = x->type_offload->xmit(x, skb2, esp_features);
169 kfree_skb_list(segs);
173 if (!skb_is_gso(skb2))
176 seq += skb_shinfo(skb2)->gso_segs;
178 skb_push(skb2, skb2->mac_len);
186 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
188 struct crypto_aead *aead = x->data;
189 struct xfrm_offload *xo = xfrm_offload(skb);
191 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
194 if (!(xo->flags & CRYPTO_DONE))
195 skb->ip_summed = CHECKSUM_NONE;
197 return esp_input_done2(skb, 0);
200 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
205 struct xfrm_offload *xo;
206 struct ip_esp_hdr *esph;
207 struct crypto_aead *aead;
209 bool hw_offload = true;
213 xo = xfrm_offload(skb);
218 if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
219 (x->xso.dev != skb->dev)) {
220 xo->flags |= CRYPTO_FALLBACK;
224 esp.proto = xo->proto;
226 /* skb is pure payload to encrypt */
229 alen = crypto_aead_authsize(aead);
232 /* XXX: Add support for tfc padding here. */
234 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
235 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
236 esp.plen = esp.clen - skb->len - esp.tfclen;
237 esp.tailen = esp.tfclen + esp.plen + alen;
239 esp.esph = ip_esp_hdr(skb);
242 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
243 esp.nfrags = esp_output_head(x, skb, &esp);
249 esph->spi = x->id.spi;
251 skb_push(skb, -skb_network_offset(skb));
253 if (xo->flags & XFRM_GSO_SEGMENT) {
254 esph->seq_no = htonl(xo->seq.low);
256 ip_hdr(skb)->tot_len = htons(skb->len);
257 ip_send_check(ip_hdr(skb));
263 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
265 err = esp_output_tail(x, skb, &esp);
274 static const struct net_offload esp4_offload = {
276 .gro_receive = esp4_gro_receive,
277 .gso_segment = esp4_gso_segment,
281 static const struct xfrm_type_offload esp_type_offload = {
282 .description = "ESP4 OFFLOAD",
283 .owner = THIS_MODULE,
284 .proto = IPPROTO_ESP,
285 .input_tail = esp_input_tail,
287 .encap = esp4_gso_encap,
290 static int __init esp4_offload_init(void)
292 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
293 pr_info("%s: can't add xfrm type offload\n", __func__);
297 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
300 static void __exit esp4_offload_exit(void)
302 if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0)
303 pr_info("%s: can't remove xfrm type offload\n", __func__);
305 inet_del_offload(&esp4_offload, IPPROTO_ESP);
308 module_init(esp4_offload_init);
309 module_exit(esp4_offload_exit);
310 MODULE_LICENSE("GPL");
311 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
312 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);