1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * xfrm_output.c - Common IPsec encapsulation code.
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/netdevice.h>
11 #include <linux/netfilter.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
17 #include <net/inet_ecn.h>
20 #if IS_ENABLED(CONFIG_IPV6)
21 #include <net/ip6_route.h>
22 #include <net/ipv6_stubs.h>
25 #include "xfrm_inout.h"
27 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
28 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
30 static int xfrm_skb_check_space(struct sk_buff *skb)
32 struct dst_entry *dst = skb_dst(skb);
33 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
35 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
44 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
47 /* Children define the path of the packet through the
48 * Linux networking. Thus, destinations are stackable.
51 static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
53 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
59 /* Add encapsulation header.
61 * The IP header will be moved forward to make space for the encapsulation
64 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
66 struct iphdr *iph = ip_hdr(skb);
67 int ihl = iph->ihl * 4;
69 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
71 skb_set_network_header(skb, -x->props.header_len);
72 skb->mac_header = skb->network_header +
73 offsetof(struct iphdr, protocol);
74 skb->transport_header = skb->network_header + ihl;
76 memmove(skb_network_header(skb), iph, ihl);
80 #if IS_ENABLED(CONFIG_IPV6_MIP6)
81 static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type)
83 const unsigned char *nh = skb_network_header(skb);
84 unsigned int offset = sizeof(struct ipv6hdr);
85 unsigned int packet_len;
88 packet_len = skb_tail_pointer(skb) - nh;
89 *nexthdr = &ipv6_hdr(skb)->nexthdr;
91 while (offset <= packet_len) {
92 struct ipv6_opt_hdr *exthdr;
98 if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) {
99 struct ipv6_rt_hdr *rt;
101 rt = (struct ipv6_rt_hdr *)(nh + offset);
108 /* HAO MUST NOT appear more than once.
109 * XXX: It is better to try to find by the end of
110 * XXX: packet if HAO exists.
112 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
113 net_dbg_ratelimited("mip6: hao exists already, override\n");
125 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
128 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
130 offset += ipv6_optlen(exthdr);
131 if (offset > IPV6_MAXPLEN)
133 *nexthdr = &exthdr->nexthdr;
140 #if IS_ENABLED(CONFIG_IPV6)
141 static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
143 switch (x->type->proto) {
144 #if IS_ENABLED(CONFIG_IPV6_MIP6)
145 case IPPROTO_DSTOPTS:
146 case IPPROTO_ROUTING:
147 return mip6_rthdr_offset(skb, prevhdr, x->type->proto);
153 return ip6_find_1stfragopt(skb, prevhdr);
157 /* Add encapsulation header.
159 * The IP header and mutable extension headers will be moved forward to make
160 * space for the encapsulation header.
162 static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
164 #if IS_ENABLED(CONFIG_IPV6)
170 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
172 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
175 skb_set_mac_header(skb,
176 (prevhdr - x->props.header_len) - skb->data);
177 skb_set_network_header(skb, -x->props.header_len);
178 skb->transport_header = skb->network_header + hdr_len;
179 __skb_pull(skb, hdr_len);
180 memmove(ipv6_hdr(skb), iph, hdr_len);
184 return -EAFNOSUPPORT;
188 /* Add route optimization header space.
190 * The IP header and mutable extension headers will be moved forward to make
191 * space for the route optimization header.
193 static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
195 #if IS_ENABLED(CONFIG_IPV6)
202 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
205 skb_set_mac_header(skb,
206 (prevhdr - x->props.header_len) - skb->data);
207 skb_set_network_header(skb, -x->props.header_len);
208 skb->transport_header = skb->network_header + hdr_len;
209 __skb_pull(skb, hdr_len);
210 memmove(ipv6_hdr(skb), iph, hdr_len);
212 x->lastused = ktime_get_real_seconds();
217 return -EAFNOSUPPORT;
221 /* Add encapsulation header.
223 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
225 static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
227 struct ip_beet_phdr *ph;
228 struct iphdr *top_iph;
232 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
233 if (unlikely(optlen))
234 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
236 skb_set_network_header(skb, -x->props.header_len - hdrlen +
237 (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
238 if (x->sel.family != AF_INET6)
239 skb->network_header += IPV4_BEET_PHMAXLEN;
240 skb->mac_header = skb->network_header +
241 offsetof(struct iphdr, protocol);
242 skb->transport_header = skb->network_header + sizeof(*top_iph);
244 xfrm4_beet_make_header(skb);
246 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
248 top_iph = ip_hdr(skb);
250 if (unlikely(optlen)) {
251 if (WARN_ON(optlen < 0))
254 ph->padlen = 4 - (optlen & 4);
255 ph->hdrlen = optlen / 8;
256 ph->nexthdr = top_iph->protocol;
258 memset(ph + 1, IPOPT_NOP, ph->padlen);
260 top_iph->protocol = IPPROTO_BEETPH;
261 top_iph->ihl = sizeof(struct iphdr) / 4;
264 top_iph->saddr = x->props.saddr.a4;
265 top_iph->daddr = x->id.daddr.a4;
270 /* Add encapsulation header.
272 * The top IP header will be constructed per RFC 2401.
274 static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
276 bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
277 struct dst_entry *dst = skb_dst(skb);
278 struct iphdr *top_iph;
281 skb_set_inner_network_header(skb, skb_network_offset(skb));
282 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
284 skb_set_network_header(skb, -x->props.header_len);
285 skb->mac_header = skb->network_header +
286 offsetof(struct iphdr, protocol);
287 skb->transport_header = skb->network_header + sizeof(*top_iph);
288 top_iph = ip_hdr(skb);
291 top_iph->version = 4;
293 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
295 /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
296 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
299 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
300 top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
301 XFRM_MODE_SKB_CB(skb)->tos);
303 flags = x->props.flags;
304 if (flags & XFRM_STATE_NOECN)
305 IP_ECN_clear(top_iph);
307 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
308 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
310 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
312 top_iph->saddr = x->props.saddr.a4;
313 top_iph->daddr = x->id.daddr.a4;
314 ip_select_ident(dev_net(dst->dev), skb, NULL);
319 #if IS_ENABLED(CONFIG_IPV6)
320 static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
322 struct dst_entry *dst = skb_dst(skb);
323 struct ipv6hdr *top_iph;
326 skb_set_inner_network_header(skb, skb_network_offset(skb));
327 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
329 skb_set_network_header(skb, -x->props.header_len);
330 skb->mac_header = skb->network_header +
331 offsetof(struct ipv6hdr, nexthdr);
332 skb->transport_header = skb->network_header + sizeof(*top_iph);
333 top_iph = ipv6_hdr(skb);
335 top_iph->version = 6;
337 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
338 sizeof(top_iph->flow_lbl));
339 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
341 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
344 dsfield = XFRM_MODE_SKB_CB(skb)->tos;
345 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
346 if (x->props.flags & XFRM_STATE_NOECN)
347 dsfield &= ~INET_ECN_MASK;
348 ipv6_change_dsfield(top_iph, 0, dsfield);
349 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
350 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
351 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
355 static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
357 struct ipv6hdr *top_iph;
358 struct ip_beet_phdr *ph;
362 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
363 if (unlikely(optlen))
364 hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
366 skb_set_network_header(skb, -x->props.header_len - hdr_len);
367 if (x->sel.family != AF_INET6)
368 skb->network_header += IPV4_BEET_PHMAXLEN;
369 skb->mac_header = skb->network_header +
370 offsetof(struct ipv6hdr, nexthdr);
371 skb->transport_header = skb->network_header + sizeof(*top_iph);
372 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
374 xfrm6_beet_make_header(skb);
376 top_iph = ipv6_hdr(skb);
377 if (unlikely(optlen)) {
378 if (WARN_ON(optlen < 0))
381 ph->padlen = 4 - (optlen & 4);
382 ph->hdrlen = optlen / 8;
383 ph->nexthdr = top_iph->nexthdr;
385 memset(ph + 1, IPOPT_NOP, ph->padlen);
387 top_iph->nexthdr = IPPROTO_BEETPH;
390 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
391 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
396 /* Add encapsulation header.
398 * On exit, the transport header will be set to the start of the
399 * encapsulation header to be filled in by x->type->output and the mac
400 * header will be set to the nextheader (protocol for IPv4) field of the
401 * extension header directly preceding the encapsulation header, or in
402 * its absence, that of the top IP header.
403 * The value of the network header will always point to the top IP header
404 * while skb->data will point to the payload.
406 static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
410 err = xfrm_inner_extract_output(x, skb);
414 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
415 skb->protocol = htons(ETH_P_IP);
417 switch (x->props.mode) {
419 return xfrm4_beet_encap_add(x, skb);
420 case XFRM_MODE_TUNNEL:
421 return xfrm4_tunnel_encap_add(x, skb);
428 static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
430 #if IS_ENABLED(CONFIG_IPV6)
433 err = xfrm_inner_extract_output(x, skb);
438 skb->protocol = htons(ETH_P_IPV6);
440 switch (x->props.mode) {
442 return xfrm6_beet_encap_add(x, skb);
443 case XFRM_MODE_TUNNEL:
444 return xfrm6_tunnel_encap_add(x, skb);
451 return -EAFNOSUPPORT;
454 static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
456 switch (x->props.mode) {
458 case XFRM_MODE_TUNNEL:
459 if (x->props.family == AF_INET)
460 return xfrm4_prepare_output(x, skb);
461 if (x->props.family == AF_INET6)
462 return xfrm6_prepare_output(x, skb);
464 case XFRM_MODE_TRANSPORT:
465 if (x->props.family == AF_INET)
466 return xfrm4_transport_output(x, skb);
467 if (x->props.family == AF_INET6)
468 return xfrm6_transport_output(x, skb);
470 case XFRM_MODE_ROUTEOPTIMIZATION:
471 if (x->props.family == AF_INET6)
472 return xfrm6_ro_output(x, skb);
483 #if IS_ENABLED(CONFIG_NET_PKTGEN)
484 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
486 return xfrm_outer_mode_output(x, skb);
488 EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
491 static int xfrm_output_one(struct sk_buff *skb, int err)
493 struct dst_entry *dst = skb_dst(skb);
494 struct xfrm_state *x = dst->xfrm;
495 struct net *net = xs_net(x);
501 err = xfrm_skb_check_space(skb);
503 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
507 skb->mark = xfrm_smark_get(skb->mark, x);
509 err = xfrm_outer_mode_output(x, skb);
511 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
515 spin_lock_bh(&x->lock);
517 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
518 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
523 err = xfrm_state_check_expire(x);
525 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
529 err = xfrm_replay_overflow(x, skb);
531 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
535 x->curlft.bytes += skb->len;
538 spin_unlock_bh(&x->lock);
542 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
547 if (xfrm_offload(skb)) {
548 x->type_offload->encap(x, skb);
550 /* Inner headers are invalid now. */
551 skb->encapsulation = 0;
553 err = x->type->output(x, skb);
554 if (err == -EINPROGRESS)
560 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
564 dst = skb_dst_pop(skb);
566 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
570 skb_dst_set(skb, dst);
572 } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
577 spin_unlock_bh(&x->lock);
584 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
586 struct net *net = xs_net(skb_dst(skb)->xfrm);
588 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
591 err = skb_dst(skb)->ops->local_out(net, sk, skb);
592 if (unlikely(err != 1))
595 if (!skb_dst(skb)->xfrm)
596 return dst_output(net, sk, skb);
598 err = nf_hook(skb_dst(skb)->ops->family,
599 NF_INET_POST_ROUTING, net, sk, skb,
600 NULL, skb_dst(skb)->dev, xfrm_output2);
601 if (unlikely(err != 1))
605 if (err == -EINPROGRESS)
611 EXPORT_SYMBOL_GPL(xfrm_output_resume);
613 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
615 return xfrm_output_resume(sk, skb, 1);
618 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
620 struct sk_buff *segs, *nskb;
622 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
623 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
624 segs = skb_gso_segment(skb, 0);
627 return PTR_ERR(segs);
631 skb_list_walk_safe(segs, segs, nskb) {
634 skb_mark_not_on_list(segs);
635 err = xfrm_output2(net, sk, segs);
638 kfree_skb_list(nskb);
646 /* For partial checksum offload, the outer header checksum is calculated
647 * by software and the inner header checksum is calculated by hardware.
648 * This requires hardware to know the inner packet type to calculate
649 * the inner header checksum. Save inner ip protocol here to avoid
650 * traversing the packet in the vendor's xmit code.
651 * For IPsec tunnel mode save the ip protocol from the IP header of the
652 * plain text packet. Otherwise If the encap type is IPIP, just save
653 * skb->inner_ipproto in any other case get the ip protocol from the IP
656 static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
658 struct xfrm_offload *xo = xfrm_offload(skb);
659 const struct ethhdr *eth;
664 if (x->outer_mode.encap == XFRM_MODE_TUNNEL) {
665 switch (x->outer_mode.family) {
667 xo->inner_ipproto = ip_hdr(skb)->protocol;
670 xo->inner_ipproto = ipv6_hdr(skb)->nexthdr;
679 /* non-Tunnel Mode */
680 if (!skb->encapsulation)
683 if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
684 xo->inner_ipproto = skb->inner_ipproto;
688 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
691 eth = (struct ethhdr *)skb_inner_mac_header(skb);
693 switch (ntohs(eth->h_proto)) {
695 xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
698 xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
703 int xfrm_output(struct sock *sk, struct sk_buff *skb)
705 struct net *net = dev_net(skb_dst(skb)->dev);
706 struct xfrm_state *x = skb_dst(skb)->xfrm;
709 switch (x->outer_mode.family) {
711 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
712 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
715 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
717 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
723 if (xfrm_dev_offload_ok(skb, x)) {
726 sp = secpath_set(skb);
728 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
734 sp->xvec[sp->len++] = x;
737 xfrm_get_inner_ipproto(skb, x);
738 skb->encapsulation = 1;
740 if (skb_is_gso(skb)) {
741 if (skb->inner_protocol)
742 return xfrm_output_gso(net, sk, skb);
744 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
748 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
752 return xfrm_output_gso(net, sk, skb);
755 if (skb->ip_summed == CHECKSUM_PARTIAL) {
756 err = skb_checksum_help(skb);
758 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
765 return xfrm_output2(net, sk, skb);
767 EXPORT_SYMBOL_GPL(xfrm_output);
769 static int xfrm4_tunnel_check_size(struct sk_buff *skb)
773 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
776 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
779 mtu = dst_mtu(skb_dst(skb));
780 if ((!skb_is_gso(skb) && skb->len > mtu) ||
782 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
783 skb->protocol = htons(ETH_P_IP);
786 xfrm_local_error(skb, mtu);
788 icmp_send(skb, ICMP_DEST_UNREACH,
789 ICMP_FRAG_NEEDED, htonl(mtu));
796 static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
800 if (x->outer_mode.encap == XFRM_MODE_BEET &&
801 ip_is_fragment(ip_hdr(skb))) {
802 net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
803 return -EAFNOSUPPORT;
806 err = xfrm4_tunnel_check_size(skb);
810 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
812 xfrm4_extract_header(skb);
816 #if IS_ENABLED(CONFIG_IPV6)
817 static int xfrm6_tunnel_check_size(struct sk_buff *skb)
820 struct dst_entry *dst = skb_dst(skb);
826 if (mtu < IPV6_MIN_MTU)
829 if ((!skb_is_gso(skb) && skb->len > mtu) ||
831 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
833 skb->protocol = htons(ETH_P_IPV6);
835 if (xfrm6_local_dontfrag(skb->sk))
836 ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
838 xfrm_local_error(skb, mtu);
840 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
848 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
850 #if IS_ENABLED(CONFIG_IPV6)
853 err = xfrm6_tunnel_check_size(skb);
857 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
859 xfrm6_extract_header(skb);
863 return -EAFNOSUPPORT;
867 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
869 switch (skb->protocol) {
870 case htons(ETH_P_IP):
871 return xfrm4_extract_output(x, skb);
872 case htons(ETH_P_IPV6):
873 return xfrm6_extract_output(x, skb);
876 return -EAFNOSUPPORT;
879 void xfrm_local_error(struct sk_buff *skb, int mtu)
882 struct xfrm_state_afinfo *afinfo;
884 if (skb->protocol == htons(ETH_P_IP))
886 else if (skb->protocol == htons(ETH_P_IPV6) &&
887 skb->sk->sk_family == AF_INET6)
892 afinfo = xfrm_state_get_afinfo(proto);
894 afinfo->local_error(skb, mtu);
898 EXPORT_SYMBOL_GPL(xfrm_local_error);