GNU Linux-libre 4.19.211-gnu1
[releases.git] / net / xfrm / xfrm_state.c
1 /*
2  * xfrm_state.c
3  *
4  * Changes:
5  *      Mitsuru KANDA @USAGI
6  *      Kazunori MIYAZAWA @USAGI
7  *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  *              IPv6 support
9  *      YOSHIFUJI Hideaki @USAGI
10  *              Split up af-specific functions
11  *      Derek Atkins <derek@ihtfp.com>
12  *              Add UDP Encapsulation
13  *
14  */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <linux/uaccess.h>
24 #include <linux/ktime.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28
29 #include "xfrm_hash.h"
30
31 #define xfrm_state_deref_prot(table, net) \
32         rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
33
34 static void xfrm_state_gc_task(struct work_struct *work);
35
36 /* Each xfrm_state may be linked to two tables:
37
38    1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39    2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40       destination/tunnel endpoint. (output)
41  */
42
43 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
44 static struct kmem_cache *xfrm_state_cache __ro_after_init;
45
46 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
47 static HLIST_HEAD(xfrm_state_gc_list);
48
49 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
50 {
51         return refcount_inc_not_zero(&x->refcnt);
52 }
53
54 static inline unsigned int xfrm_dst_hash(struct net *net,
55                                          const xfrm_address_t *daddr,
56                                          const xfrm_address_t *saddr,
57                                          u32 reqid,
58                                          unsigned short family)
59 {
60         return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
61 }
62
63 static inline unsigned int xfrm_src_hash(struct net *net,
64                                          const xfrm_address_t *daddr,
65                                          const xfrm_address_t *saddr,
66                                          unsigned short family)
67 {
68         return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
69 }
70
71 static inline unsigned int
72 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
73               __be32 spi, u8 proto, unsigned short family)
74 {
75         return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
76 }
77
78 static void xfrm_hash_transfer(struct hlist_head *list,
79                                struct hlist_head *ndsttable,
80                                struct hlist_head *nsrctable,
81                                struct hlist_head *nspitable,
82                                unsigned int nhashmask)
83 {
84         struct hlist_node *tmp;
85         struct xfrm_state *x;
86
87         hlist_for_each_entry_safe(x, tmp, list, bydst) {
88                 unsigned int h;
89
90                 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
91                                     x->props.reqid, x->props.family,
92                                     nhashmask);
93                 hlist_add_head_rcu(&x->bydst, ndsttable + h);
94
95                 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
96                                     x->props.family,
97                                     nhashmask);
98                 hlist_add_head_rcu(&x->bysrc, nsrctable + h);
99
100                 if (x->id.spi) {
101                         h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
102                                             x->id.proto, x->props.family,
103                                             nhashmask);
104                         hlist_add_head_rcu(&x->byspi, nspitable + h);
105                 }
106         }
107 }
108
109 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
110 {
111         return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
112 }
113
114 static void xfrm_hash_resize(struct work_struct *work)
115 {
116         struct net *net = container_of(work, struct net, xfrm.state_hash_work);
117         struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
118         unsigned long nsize, osize;
119         unsigned int nhashmask, ohashmask;
120         int i;
121
122         nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
123         ndst = xfrm_hash_alloc(nsize);
124         if (!ndst)
125                 return;
126         nsrc = xfrm_hash_alloc(nsize);
127         if (!nsrc) {
128                 xfrm_hash_free(ndst, nsize);
129                 return;
130         }
131         nspi = xfrm_hash_alloc(nsize);
132         if (!nspi) {
133                 xfrm_hash_free(ndst, nsize);
134                 xfrm_hash_free(nsrc, nsize);
135                 return;
136         }
137
138         spin_lock_bh(&net->xfrm.xfrm_state_lock);
139         write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
140
141         nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
142         odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
143         for (i = net->xfrm.state_hmask; i >= 0; i--)
144                 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
145
146         osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
147         ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
148         ohashmask = net->xfrm.state_hmask;
149
150         rcu_assign_pointer(net->xfrm.state_bydst, ndst);
151         rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
152         rcu_assign_pointer(net->xfrm.state_byspi, nspi);
153         net->xfrm.state_hmask = nhashmask;
154
155         write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
156         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
157
158         osize = (ohashmask + 1) * sizeof(struct hlist_head);
159
160         synchronize_rcu();
161
162         xfrm_hash_free(odst, osize);
163         xfrm_hash_free(osrc, osize);
164         xfrm_hash_free(ospi, osize);
165 }
166
167 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
168 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
169
170 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
171
172 int __xfrm_state_delete(struct xfrm_state *x);
173
174 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
175 bool km_is_alive(const struct km_event *c);
176 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
177
178 static DEFINE_SPINLOCK(xfrm_type_lock);
179 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
180 {
181         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
182         const struct xfrm_type **typemap;
183         int err = 0;
184
185         if (unlikely(afinfo == NULL))
186                 return -EAFNOSUPPORT;
187         typemap = afinfo->type_map;
188         spin_lock_bh(&xfrm_type_lock);
189
190         if (likely(typemap[type->proto] == NULL))
191                 typemap[type->proto] = type;
192         else
193                 err = -EEXIST;
194         spin_unlock_bh(&xfrm_type_lock);
195         rcu_read_unlock();
196         return err;
197 }
198 EXPORT_SYMBOL(xfrm_register_type);
199
200 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
201 {
202         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
203         const struct xfrm_type **typemap;
204         int err = 0;
205
206         if (unlikely(afinfo == NULL))
207                 return -EAFNOSUPPORT;
208         typemap = afinfo->type_map;
209         spin_lock_bh(&xfrm_type_lock);
210
211         if (unlikely(typemap[type->proto] != type))
212                 err = -ENOENT;
213         else
214                 typemap[type->proto] = NULL;
215         spin_unlock_bh(&xfrm_type_lock);
216         rcu_read_unlock();
217         return err;
218 }
219 EXPORT_SYMBOL(xfrm_unregister_type);
220
221 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
222 {
223         struct xfrm_state_afinfo *afinfo;
224         const struct xfrm_type **typemap;
225         const struct xfrm_type *type;
226         int modload_attempted = 0;
227
228 retry:
229         afinfo = xfrm_state_get_afinfo(family);
230         if (unlikely(afinfo == NULL))
231                 return NULL;
232         typemap = afinfo->type_map;
233
234         type = READ_ONCE(typemap[proto]);
235         if (unlikely(type && !try_module_get(type->owner)))
236                 type = NULL;
237
238         rcu_read_unlock();
239
240         if (!type && !modload_attempted) {
241                 request_module("xfrm-type-%d-%d", family, proto);
242                 modload_attempted = 1;
243                 goto retry;
244         }
245
246         return type;
247 }
248
249 static void xfrm_put_type(const struct xfrm_type *type)
250 {
251         module_put(type->owner);
252 }
253
254 static DEFINE_SPINLOCK(xfrm_type_offload_lock);
255 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
256                                unsigned short family)
257 {
258         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
259         const struct xfrm_type_offload **typemap;
260         int err = 0;
261
262         if (unlikely(afinfo == NULL))
263                 return -EAFNOSUPPORT;
264         typemap = afinfo->type_offload_map;
265         spin_lock_bh(&xfrm_type_offload_lock);
266
267         if (likely(typemap[type->proto] == NULL))
268                 typemap[type->proto] = type;
269         else
270                 err = -EEXIST;
271         spin_unlock_bh(&xfrm_type_offload_lock);
272         rcu_read_unlock();
273         return err;
274 }
275 EXPORT_SYMBOL(xfrm_register_type_offload);
276
277 int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
278                                  unsigned short family)
279 {
280         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
281         const struct xfrm_type_offload **typemap;
282         int err = 0;
283
284         if (unlikely(afinfo == NULL))
285                 return -EAFNOSUPPORT;
286         typemap = afinfo->type_offload_map;
287         spin_lock_bh(&xfrm_type_offload_lock);
288
289         if (unlikely(typemap[type->proto] != type))
290                 err = -ENOENT;
291         else
292                 typemap[type->proto] = NULL;
293         spin_unlock_bh(&xfrm_type_offload_lock);
294         rcu_read_unlock();
295         return err;
296 }
297 EXPORT_SYMBOL(xfrm_unregister_type_offload);
298
299 static const struct xfrm_type_offload *
300 xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
301 {
302         struct xfrm_state_afinfo *afinfo;
303         const struct xfrm_type_offload **typemap;
304         const struct xfrm_type_offload *type;
305
306 retry:
307         afinfo = xfrm_state_get_afinfo(family);
308         if (unlikely(afinfo == NULL))
309                 return NULL;
310         typemap = afinfo->type_offload_map;
311
312         type = typemap[proto];
313         if ((type && !try_module_get(type->owner)))
314                 type = NULL;
315
316         rcu_read_unlock();
317
318         if (!type && try_load) {
319                 request_module("xfrm-offload-%d-%d", family, proto);
320                 try_load = false;
321                 goto retry;
322         }
323
324         return type;
325 }
326
327 static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
328 {
329         module_put(type->owner);
330 }
331
332 static DEFINE_SPINLOCK(xfrm_mode_lock);
333 int xfrm_register_mode(struct xfrm_mode *mode, int family)
334 {
335         struct xfrm_state_afinfo *afinfo;
336         struct xfrm_mode **modemap;
337         int err;
338
339         if (unlikely(mode->encap >= XFRM_MODE_MAX))
340                 return -EINVAL;
341
342         afinfo = xfrm_state_get_afinfo(family);
343         if (unlikely(afinfo == NULL))
344                 return -EAFNOSUPPORT;
345
346         err = -EEXIST;
347         modemap = afinfo->mode_map;
348         spin_lock_bh(&xfrm_mode_lock);
349         if (modemap[mode->encap])
350                 goto out;
351
352         err = -ENOENT;
353         if (!try_module_get(afinfo->owner))
354                 goto out;
355
356         mode->afinfo = afinfo;
357         modemap[mode->encap] = mode;
358         err = 0;
359
360 out:
361         spin_unlock_bh(&xfrm_mode_lock);
362         rcu_read_unlock();
363         return err;
364 }
365 EXPORT_SYMBOL(xfrm_register_mode);
366
367 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
368 {
369         struct xfrm_state_afinfo *afinfo;
370         struct xfrm_mode **modemap;
371         int err;
372
373         if (unlikely(mode->encap >= XFRM_MODE_MAX))
374                 return -EINVAL;
375
376         afinfo = xfrm_state_get_afinfo(family);
377         if (unlikely(afinfo == NULL))
378                 return -EAFNOSUPPORT;
379
380         err = -ENOENT;
381         modemap = afinfo->mode_map;
382         spin_lock_bh(&xfrm_mode_lock);
383         if (likely(modemap[mode->encap] == mode)) {
384                 modemap[mode->encap] = NULL;
385                 module_put(mode->afinfo->owner);
386                 err = 0;
387         }
388
389         spin_unlock_bh(&xfrm_mode_lock);
390         rcu_read_unlock();
391         return err;
392 }
393 EXPORT_SYMBOL(xfrm_unregister_mode);
394
395 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
396 {
397         struct xfrm_state_afinfo *afinfo;
398         struct xfrm_mode *mode;
399         int modload_attempted = 0;
400
401         if (unlikely(encap >= XFRM_MODE_MAX))
402                 return NULL;
403
404 retry:
405         afinfo = xfrm_state_get_afinfo(family);
406         if (unlikely(afinfo == NULL))
407                 return NULL;
408
409         mode = READ_ONCE(afinfo->mode_map[encap]);
410         if (unlikely(mode && !try_module_get(mode->owner)))
411                 mode = NULL;
412
413         rcu_read_unlock();
414         if (!mode && !modload_attempted) {
415                 request_module("xfrm-mode-%d-%d", family, encap);
416                 modload_attempted = 1;
417                 goto retry;
418         }
419
420         return mode;
421 }
422
423 static void xfrm_put_mode(struct xfrm_mode *mode)
424 {
425         module_put(mode->owner);
426 }
427
428 void xfrm_state_free(struct xfrm_state *x)
429 {
430         kmem_cache_free(xfrm_state_cache, x);
431 }
432 EXPORT_SYMBOL(xfrm_state_free);
433
434 static void ___xfrm_state_destroy(struct xfrm_state *x)
435 {
436         tasklet_hrtimer_cancel(&x->mtimer);
437         del_timer_sync(&x->rtimer);
438         kfree(x->aead);
439         kfree(x->aalg);
440         kfree(x->ealg);
441         kfree(x->calg);
442         kfree(x->encap);
443         kfree(x->coaddr);
444         kfree(x->replay_esn);
445         kfree(x->preplay_esn);
446         if (x->inner_mode)
447                 xfrm_put_mode(x->inner_mode);
448         if (x->inner_mode_iaf)
449                 xfrm_put_mode(x->inner_mode_iaf);
450         if (x->outer_mode)
451                 xfrm_put_mode(x->outer_mode);
452         if (x->type_offload)
453                 xfrm_put_type_offload(x->type_offload);
454         if (x->type) {
455                 x->type->destructor(x);
456                 xfrm_put_type(x->type);
457         }
458         if (x->xfrag.page)
459                 put_page(x->xfrag.page);
460         xfrm_dev_state_free(x);
461         security_xfrm_state_free(x);
462         xfrm_state_free(x);
463 }
464
465 static void xfrm_state_gc_task(struct work_struct *work)
466 {
467         struct xfrm_state *x;
468         struct hlist_node *tmp;
469         struct hlist_head gc_list;
470
471         spin_lock_bh(&xfrm_state_gc_lock);
472         hlist_move_list(&xfrm_state_gc_list, &gc_list);
473         spin_unlock_bh(&xfrm_state_gc_lock);
474
475         synchronize_rcu();
476
477         hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
478                 ___xfrm_state_destroy(x);
479 }
480
481 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
482 {
483         struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
484         struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
485         time64_t now = ktime_get_real_seconds();
486         time64_t next = TIME64_MAX;
487         int warn = 0;
488         int err = 0;
489
490         spin_lock(&x->lock);
491         if (x->km.state == XFRM_STATE_DEAD)
492                 goto out;
493         if (x->km.state == XFRM_STATE_EXPIRED)
494                 goto expired;
495         if (x->lft.hard_add_expires_seconds) {
496                 long tmo = x->lft.hard_add_expires_seconds +
497                         x->curlft.add_time - now;
498                 if (tmo <= 0) {
499                         if (x->xflags & XFRM_SOFT_EXPIRE) {
500                                 /* enter hard expire without soft expire first?!
501                                  * setting a new date could trigger this.
502                                  * workaround: fix x->curflt.add_time by below:
503                                  */
504                                 x->curlft.add_time = now - x->saved_tmo - 1;
505                                 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
506                         } else
507                                 goto expired;
508                 }
509                 if (tmo < next)
510                         next = tmo;
511         }
512         if (x->lft.hard_use_expires_seconds) {
513                 long tmo = x->lft.hard_use_expires_seconds +
514                         (x->curlft.use_time ? : now) - now;
515                 if (tmo <= 0)
516                         goto expired;
517                 if (tmo < next)
518                         next = tmo;
519         }
520         if (x->km.dying)
521                 goto resched;
522         if (x->lft.soft_add_expires_seconds) {
523                 long tmo = x->lft.soft_add_expires_seconds +
524                         x->curlft.add_time - now;
525                 if (tmo <= 0) {
526                         warn = 1;
527                         x->xflags &= ~XFRM_SOFT_EXPIRE;
528                 } else if (tmo < next) {
529                         next = tmo;
530                         x->xflags |= XFRM_SOFT_EXPIRE;
531                         x->saved_tmo = tmo;
532                 }
533         }
534         if (x->lft.soft_use_expires_seconds) {
535                 long tmo = x->lft.soft_use_expires_seconds +
536                         (x->curlft.use_time ? : now) - now;
537                 if (tmo <= 0)
538                         warn = 1;
539                 else if (tmo < next)
540                         next = tmo;
541         }
542
543         x->km.dying = warn;
544         if (warn)
545                 km_state_expired(x, 0, 0);
546 resched:
547         if (next != TIME64_MAX) {
548                 tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
549         }
550
551         goto out;
552
553 expired:
554         if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
555                 x->km.state = XFRM_STATE_EXPIRED;
556
557         err = __xfrm_state_delete(x);
558         if (!err)
559                 km_state_expired(x, 1, 0);
560
561         xfrm_audit_state_delete(x, err ? 0 : 1, true);
562
563 out:
564         spin_unlock(&x->lock);
565         return HRTIMER_NORESTART;
566 }
567
568 static void xfrm_replay_timer_handler(struct timer_list *t);
569
570 struct xfrm_state *xfrm_state_alloc(struct net *net)
571 {
572         struct xfrm_state *x;
573
574         x = kmem_cache_alloc(xfrm_state_cache, GFP_ATOMIC | __GFP_ZERO);
575
576         if (x) {
577                 write_pnet(&x->xs_net, net);
578                 refcount_set(&x->refcnt, 1);
579                 atomic_set(&x->tunnel_users, 0);
580                 INIT_LIST_HEAD(&x->km.all);
581                 INIT_HLIST_NODE(&x->bydst);
582                 INIT_HLIST_NODE(&x->bysrc);
583                 INIT_HLIST_NODE(&x->byspi);
584                 tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
585                                         CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
586                 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
587                 x->curlft.add_time = ktime_get_real_seconds();
588                 x->lft.soft_byte_limit = XFRM_INF;
589                 x->lft.soft_packet_limit = XFRM_INF;
590                 x->lft.hard_byte_limit = XFRM_INF;
591                 x->lft.hard_packet_limit = XFRM_INF;
592                 x->replay_maxage = 0;
593                 x->replay_maxdiff = 0;
594                 x->inner_mode = NULL;
595                 x->inner_mode_iaf = NULL;
596                 spin_lock_init(&x->lock);
597         }
598         return x;
599 }
600 EXPORT_SYMBOL(xfrm_state_alloc);
601
602 void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
603 {
604         WARN_ON(x->km.state != XFRM_STATE_DEAD);
605
606         if (sync) {
607                 synchronize_rcu();
608                 ___xfrm_state_destroy(x);
609         } else {
610                 spin_lock_bh(&xfrm_state_gc_lock);
611                 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
612                 spin_unlock_bh(&xfrm_state_gc_lock);
613                 schedule_work(&xfrm_state_gc_work);
614         }
615 }
616 EXPORT_SYMBOL(__xfrm_state_destroy);
617
618 int __xfrm_state_delete(struct xfrm_state *x)
619 {
620         struct net *net = xs_net(x);
621         int err = -ESRCH;
622
623         if (x->km.state != XFRM_STATE_DEAD) {
624                 x->km.state = XFRM_STATE_DEAD;
625                 spin_lock(&net->xfrm.xfrm_state_lock);
626                 list_del(&x->km.all);
627                 hlist_del_rcu(&x->bydst);
628                 hlist_del_rcu(&x->bysrc);
629                 if (x->id.spi)
630                         hlist_del_rcu(&x->byspi);
631                 net->xfrm.state_num--;
632                 spin_unlock(&net->xfrm.xfrm_state_lock);
633
634                 xfrm_dev_state_delete(x);
635
636                 /* All xfrm_state objects are created by xfrm_state_alloc.
637                  * The xfrm_state_alloc call gives a reference, and that
638                  * is what we are dropping here.
639                  */
640                 xfrm_state_put(x);
641                 err = 0;
642         }
643
644         return err;
645 }
646 EXPORT_SYMBOL(__xfrm_state_delete);
647
648 int xfrm_state_delete(struct xfrm_state *x)
649 {
650         int err;
651
652         spin_lock_bh(&x->lock);
653         err = __xfrm_state_delete(x);
654         spin_unlock_bh(&x->lock);
655
656         return err;
657 }
658 EXPORT_SYMBOL(xfrm_state_delete);
659
660 #ifdef CONFIG_SECURITY_NETWORK_XFRM
661 static inline int
662 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
663 {
664         int i, err = 0;
665
666         for (i = 0; i <= net->xfrm.state_hmask; i++) {
667                 struct xfrm_state *x;
668
669                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
670                         if (xfrm_id_proto_match(x->id.proto, proto) &&
671                            (err = security_xfrm_state_delete(x)) != 0) {
672                                 xfrm_audit_state_delete(x, 0, task_valid);
673                                 return err;
674                         }
675                 }
676         }
677
678         return err;
679 }
680
681 static inline int
682 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
683 {
684         int i, err = 0;
685
686         for (i = 0; i <= net->xfrm.state_hmask; i++) {
687                 struct xfrm_state *x;
688                 struct xfrm_state_offload *xso;
689
690                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
691                         xso = &x->xso;
692
693                         if (xso->dev == dev &&
694                            (err = security_xfrm_state_delete(x)) != 0) {
695                                 xfrm_audit_state_delete(x, 0, task_valid);
696                                 return err;
697                         }
698                 }
699         }
700
701         return err;
702 }
703 #else
704 static inline int
705 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
706 {
707         return 0;
708 }
709
710 static inline int
711 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
712 {
713         return 0;
714 }
715 #endif
716
717 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
718 {
719         int i, err = 0, cnt = 0;
720
721         spin_lock_bh(&net->xfrm.xfrm_state_lock);
722         err = xfrm_state_flush_secctx_check(net, proto, task_valid);
723         if (err)
724                 goto out;
725
726         err = -ESRCH;
727         for (i = 0; i <= net->xfrm.state_hmask; i++) {
728                 struct xfrm_state *x;
729 restart:
730                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
731                         if (!xfrm_state_kern(x) &&
732                             xfrm_id_proto_match(x->id.proto, proto)) {
733                                 xfrm_state_hold(x);
734                                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
735
736                                 err = xfrm_state_delete(x);
737                                 xfrm_audit_state_delete(x, err ? 0 : 1,
738                                                         task_valid);
739                                 if (sync)
740                                         xfrm_state_put_sync(x);
741                                 else
742                                         xfrm_state_put(x);
743                                 if (!err)
744                                         cnt++;
745
746                                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
747                                 goto restart;
748                         }
749                 }
750         }
751 out:
752         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
753         if (cnt)
754                 err = 0;
755
756         return err;
757 }
758 EXPORT_SYMBOL(xfrm_state_flush);
759
760 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
761 {
762         int i, err = 0, cnt = 0;
763
764         spin_lock_bh(&net->xfrm.xfrm_state_lock);
765         err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
766         if (err)
767                 goto out;
768
769         err = -ESRCH;
770         for (i = 0; i <= net->xfrm.state_hmask; i++) {
771                 struct xfrm_state *x;
772                 struct xfrm_state_offload *xso;
773 restart:
774                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
775                         xso = &x->xso;
776
777                         if (!xfrm_state_kern(x) && xso->dev == dev) {
778                                 xfrm_state_hold(x);
779                                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
780
781                                 err = xfrm_state_delete(x);
782                                 xfrm_audit_state_delete(x, err ? 0 : 1,
783                                                         task_valid);
784                                 xfrm_state_put(x);
785                                 if (!err)
786                                         cnt++;
787
788                                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
789                                 goto restart;
790                         }
791                 }
792         }
793         if (cnt)
794                 err = 0;
795
796 out:
797         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
798         return err;
799 }
800 EXPORT_SYMBOL(xfrm_dev_state_flush);
801
802 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
803 {
804         spin_lock_bh(&net->xfrm.xfrm_state_lock);
805         si->sadcnt = net->xfrm.state_num;
806         si->sadhcnt = net->xfrm.state_hmask + 1;
807         si->sadhmcnt = xfrm_state_hashmax;
808         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
809 }
810 EXPORT_SYMBOL(xfrm_sad_getinfo);
811
812 static void
813 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
814                     const struct xfrm_tmpl *tmpl,
815                     const xfrm_address_t *daddr, const xfrm_address_t *saddr,
816                     unsigned short family)
817 {
818         struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family);
819
820         if (!afinfo)
821                 return;
822
823         afinfo->init_tempsel(&x->sel, fl);
824
825         if (family != tmpl->encap_family) {
826                 afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family);
827                 if (!afinfo)
828                         return;
829         }
830         afinfo->init_temprop(x, tmpl, daddr, saddr);
831 }
832
833 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
834                                               const xfrm_address_t *daddr,
835                                               __be32 spi, u8 proto,
836                                               unsigned short family)
837 {
838         unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
839         struct xfrm_state *x;
840
841         hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
842                 if (x->props.family != family ||
843                     x->id.spi       != spi ||
844                     x->id.proto     != proto ||
845                     !xfrm_addr_equal(&x->id.daddr, daddr, family))
846                         continue;
847
848                 if ((mark & x->mark.m) != x->mark.v)
849                         continue;
850                 if (!xfrm_state_hold_rcu(x))
851                         continue;
852                 return x;
853         }
854
855         return NULL;
856 }
857
858 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
859                                                      const xfrm_address_t *daddr,
860                                                      const xfrm_address_t *saddr,
861                                                      u8 proto, unsigned short family)
862 {
863         unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
864         struct xfrm_state *x;
865
866         hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
867                 if (x->props.family != family ||
868                     x->id.proto     != proto ||
869                     !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
870                     !xfrm_addr_equal(&x->props.saddr, saddr, family))
871                         continue;
872
873                 if ((mark & x->mark.m) != x->mark.v)
874                         continue;
875                 if (!xfrm_state_hold_rcu(x))
876                         continue;
877                 return x;
878         }
879
880         return NULL;
881 }
882
883 static inline struct xfrm_state *
884 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
885 {
886         struct net *net = xs_net(x);
887         u32 mark = x->mark.v & x->mark.m;
888
889         if (use_spi)
890                 return __xfrm_state_lookup(net, mark, &x->id.daddr,
891                                            x->id.spi, x->id.proto, family);
892         else
893                 return __xfrm_state_lookup_byaddr(net, mark,
894                                                   &x->id.daddr,
895                                                   &x->props.saddr,
896                                                   x->id.proto, family);
897 }
898
899 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
900 {
901         if (have_hash_collision &&
902             (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
903             net->xfrm.state_num > net->xfrm.state_hmask)
904                 schedule_work(&net->xfrm.state_hash_work);
905 }
906
907 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
908                                const struct flowi *fl, unsigned short family,
909                                struct xfrm_state **best, int *acq_in_progress,
910                                int *error)
911 {
912         /* Resolution logic:
913          * 1. There is a valid state with matching selector. Done.
914          * 2. Valid state with inappropriate selector. Skip.
915          *
916          * Entering area of "sysdeps".
917          *
918          * 3. If state is not valid, selector is temporary, it selects
919          *    only session which triggered previous resolution. Key
920          *    manager will do something to install a state with proper
921          *    selector.
922          */
923         if (x->km.state == XFRM_STATE_VALID) {
924                 if ((x->sel.family &&
925                      (x->sel.family != family ||
926                       !xfrm_selector_match(&x->sel, fl, family))) ||
927                     !security_xfrm_state_pol_flow_match(x, pol, fl))
928                         return;
929
930                 if (!*best ||
931                     (*best)->km.dying > x->km.dying ||
932                     ((*best)->km.dying == x->km.dying &&
933                      (*best)->curlft.add_time < x->curlft.add_time))
934                         *best = x;
935         } else if (x->km.state == XFRM_STATE_ACQ) {
936                 *acq_in_progress = 1;
937         } else if (x->km.state == XFRM_STATE_ERROR ||
938                    x->km.state == XFRM_STATE_EXPIRED) {
939                 if ((!x->sel.family ||
940                      (x->sel.family == family &&
941                       xfrm_selector_match(&x->sel, fl, family))) &&
942                     security_xfrm_state_pol_flow_match(x, pol, fl))
943                         *error = -ESRCH;
944         }
945 }
946
947 struct xfrm_state *
948 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
949                 const struct flowi *fl, struct xfrm_tmpl *tmpl,
950                 struct xfrm_policy *pol, int *err,
951                 unsigned short family, u32 if_id)
952 {
953         static xfrm_address_t saddr_wildcard = { };
954         struct net *net = xp_net(pol);
955         unsigned int h, h_wildcard;
956         struct xfrm_state *x, *x0, *to_put;
957         int acquire_in_progress = 0;
958         int error = 0;
959         struct xfrm_state *best = NULL;
960         u32 mark = pol->mark.v & pol->mark.m;
961         unsigned short encap_family = tmpl->encap_family;
962         unsigned int sequence;
963         struct km_event c;
964
965         to_put = NULL;
966
967         sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
968
969         rcu_read_lock();
970         h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
971         hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
972                 if (x->props.family == encap_family &&
973                     x->props.reqid == tmpl->reqid &&
974                     (mark & x->mark.m) == x->mark.v &&
975                     x->if_id == if_id &&
976                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
977                     xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
978                     tmpl->mode == x->props.mode &&
979                     tmpl->id.proto == x->id.proto &&
980                     (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
981                         xfrm_state_look_at(pol, x, fl, family,
982                                            &best, &acquire_in_progress, &error);
983         }
984         if (best || acquire_in_progress)
985                 goto found;
986
987         h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
988         hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
989                 if (x->props.family == encap_family &&
990                     x->props.reqid == tmpl->reqid &&
991                     (mark & x->mark.m) == x->mark.v &&
992                     x->if_id == if_id &&
993                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
994                     xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
995                     tmpl->mode == x->props.mode &&
996                     tmpl->id.proto == x->id.proto &&
997                     (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
998                         xfrm_state_look_at(pol, x, fl, family,
999                                            &best, &acquire_in_progress, &error);
1000         }
1001
1002 found:
1003         x = best;
1004         if (!x && !error && !acquire_in_progress) {
1005                 if (tmpl->id.spi &&
1006                     (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
1007                                               tmpl->id.proto, encap_family)) != NULL) {
1008                         to_put = x0;
1009                         error = -EEXIST;
1010                         goto out;
1011                 }
1012
1013                 c.net = net;
1014                 /* If the KMs have no listeners (yet...), avoid allocating an SA
1015                  * for each and every packet - garbage collection might not
1016                  * handle the flood.
1017                  */
1018                 if (!km_is_alive(&c)) {
1019                         error = -ESRCH;
1020                         goto out;
1021                 }
1022
1023                 x = xfrm_state_alloc(net);
1024                 if (x == NULL) {
1025                         error = -ENOMEM;
1026                         goto out;
1027                 }
1028                 /* Initialize temporary state matching only
1029                  * to current session. */
1030                 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1031                 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1032                 x->if_id = if_id;
1033
1034                 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1035                 if (error) {
1036                         x->km.state = XFRM_STATE_DEAD;
1037                         to_put = x;
1038                         x = NULL;
1039                         goto out;
1040                 }
1041
1042                 if (km_query(x, tmpl, pol) == 0) {
1043                         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1044                         x->km.state = XFRM_STATE_ACQ;
1045                         list_add(&x->km.all, &net->xfrm.state_all);
1046                         hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1047                         h = xfrm_src_hash(net, daddr, saddr, encap_family);
1048                         hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1049                         if (x->id.spi) {
1050                                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1051                                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1052                         }
1053                         x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1054                         tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1055                         net->xfrm.state_num++;
1056                         xfrm_hash_grow_check(net, x->bydst.next != NULL);
1057                         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1058                 } else {
1059                         x->km.state = XFRM_STATE_DEAD;
1060                         to_put = x;
1061                         x = NULL;
1062                         error = -ESRCH;
1063                 }
1064         }
1065 out:
1066         if (x) {
1067                 if (!xfrm_state_hold_rcu(x)) {
1068                         *err = -EAGAIN;
1069                         x = NULL;
1070                 }
1071         } else {
1072                 *err = acquire_in_progress ? -EAGAIN : error;
1073         }
1074         rcu_read_unlock();
1075         if (to_put)
1076                 xfrm_state_put(to_put);
1077
1078         if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
1079                 *err = -EAGAIN;
1080                 if (x) {
1081                         xfrm_state_put(x);
1082                         x = NULL;
1083                 }
1084         }
1085
1086         return x;
1087 }
1088
1089 struct xfrm_state *
1090 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1091                     xfrm_address_t *daddr, xfrm_address_t *saddr,
1092                     unsigned short family, u8 mode, u8 proto, u32 reqid)
1093 {
1094         unsigned int h;
1095         struct xfrm_state *rx = NULL, *x = NULL;
1096
1097         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1098         h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1099         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1100                 if (x->props.family == family &&
1101                     x->props.reqid == reqid &&
1102                     (mark & x->mark.m) == x->mark.v &&
1103                     x->if_id == if_id &&
1104                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
1105                     xfrm_state_addr_check(x, daddr, saddr, family) &&
1106                     mode == x->props.mode &&
1107                     proto == x->id.proto &&
1108                     x->km.state == XFRM_STATE_VALID) {
1109                         rx = x;
1110                         break;
1111                 }
1112         }
1113
1114         if (rx)
1115                 xfrm_state_hold(rx);
1116         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1117
1118
1119         return rx;
1120 }
1121 EXPORT_SYMBOL(xfrm_stateonly_find);
1122
1123 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1124                                               unsigned short family)
1125 {
1126         struct xfrm_state *x;
1127         struct xfrm_state_walk *w;
1128
1129         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1130         list_for_each_entry(w, &net->xfrm.state_all, all) {
1131                 x = container_of(w, struct xfrm_state, km);
1132                 if (x->props.family != family ||
1133                         x->id.spi != spi)
1134                         continue;
1135
1136                 xfrm_state_hold(x);
1137                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1138                 return x;
1139         }
1140         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1141         return NULL;
1142 }
1143 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1144
1145 static void __xfrm_state_insert(struct xfrm_state *x)
1146 {
1147         struct net *net = xs_net(x);
1148         unsigned int h;
1149
1150         list_add(&x->km.all, &net->xfrm.state_all);
1151
1152         h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1153                           x->props.reqid, x->props.family);
1154         hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1155
1156         h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1157         hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1158
1159         if (x->id.spi) {
1160                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1161                                   x->props.family);
1162
1163                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1164         }
1165
1166         tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1167         if (x->replay_maxage)
1168                 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1169
1170         net->xfrm.state_num++;
1171
1172         xfrm_hash_grow_check(net, x->bydst.next != NULL);
1173 }
1174
1175 /* net->xfrm.xfrm_state_lock is held */
1176 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1177 {
1178         struct net *net = xs_net(xnew);
1179         unsigned short family = xnew->props.family;
1180         u32 reqid = xnew->props.reqid;
1181         struct xfrm_state *x;
1182         unsigned int h;
1183         u32 mark = xnew->mark.v & xnew->mark.m;
1184         u32 if_id = xnew->if_id;
1185
1186         h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1187         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1188                 if (x->props.family     == family &&
1189                     x->props.reqid      == reqid &&
1190                     x->if_id            == if_id &&
1191                     (mark & x->mark.m) == x->mark.v &&
1192                     xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1193                     xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1194                         x->genid++;
1195         }
1196 }
1197
1198 void xfrm_state_insert(struct xfrm_state *x)
1199 {
1200         struct net *net = xs_net(x);
1201
1202         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1203         __xfrm_state_bump_genids(x);
1204         __xfrm_state_insert(x);
1205         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1206 }
1207 EXPORT_SYMBOL(xfrm_state_insert);
1208
1209 /* net->xfrm.xfrm_state_lock is held */
1210 static struct xfrm_state *__find_acq_core(struct net *net,
1211                                           const struct xfrm_mark *m,
1212                                           unsigned short family, u8 mode,
1213                                           u32 reqid, u32 if_id, u8 proto,
1214                                           const xfrm_address_t *daddr,
1215                                           const xfrm_address_t *saddr,
1216                                           int create)
1217 {
1218         unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1219         struct xfrm_state *x;
1220         u32 mark = m->v & m->m;
1221
1222         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1223                 if (x->props.reqid  != reqid ||
1224                     x->props.mode   != mode ||
1225                     x->props.family != family ||
1226                     x->km.state     != XFRM_STATE_ACQ ||
1227                     x->id.spi       != 0 ||
1228                     x->id.proto     != proto ||
1229                     (mark & x->mark.m) != x->mark.v ||
1230                     !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1231                     !xfrm_addr_equal(&x->props.saddr, saddr, family))
1232                         continue;
1233
1234                 xfrm_state_hold(x);
1235                 return x;
1236         }
1237
1238         if (!create)
1239                 return NULL;
1240
1241         x = xfrm_state_alloc(net);
1242         if (likely(x)) {
1243                 switch (family) {
1244                 case AF_INET:
1245                         x->sel.daddr.a4 = daddr->a4;
1246                         x->sel.saddr.a4 = saddr->a4;
1247                         x->sel.prefixlen_d = 32;
1248                         x->sel.prefixlen_s = 32;
1249                         x->props.saddr.a4 = saddr->a4;
1250                         x->id.daddr.a4 = daddr->a4;
1251                         break;
1252
1253                 case AF_INET6:
1254                         x->sel.daddr.in6 = daddr->in6;
1255                         x->sel.saddr.in6 = saddr->in6;
1256                         x->sel.prefixlen_d = 128;
1257                         x->sel.prefixlen_s = 128;
1258                         x->props.saddr.in6 = saddr->in6;
1259                         x->id.daddr.in6 = daddr->in6;
1260                         break;
1261                 }
1262
1263                 x->km.state = XFRM_STATE_ACQ;
1264                 x->id.proto = proto;
1265                 x->props.family = family;
1266                 x->props.mode = mode;
1267                 x->props.reqid = reqid;
1268                 x->if_id = if_id;
1269                 x->mark.v = m->v;
1270                 x->mark.m = m->m;
1271                 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1272                 xfrm_state_hold(x);
1273                 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1274                 list_add(&x->km.all, &net->xfrm.state_all);
1275                 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1276                 h = xfrm_src_hash(net, daddr, saddr, family);
1277                 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1278
1279                 net->xfrm.state_num++;
1280
1281                 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1282         }
1283
1284         return x;
1285 }
1286
1287 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1288
1289 int xfrm_state_add(struct xfrm_state *x)
1290 {
1291         struct net *net = xs_net(x);
1292         struct xfrm_state *x1, *to_put;
1293         int family;
1294         int err;
1295         u32 mark = x->mark.v & x->mark.m;
1296         int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1297
1298         family = x->props.family;
1299
1300         to_put = NULL;
1301
1302         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1303
1304         x1 = __xfrm_state_locate(x, use_spi, family);
1305         if (x1) {
1306                 to_put = x1;
1307                 x1 = NULL;
1308                 err = -EEXIST;
1309                 goto out;
1310         }
1311
1312         if (use_spi && x->km.seq) {
1313                 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1314                 if (x1 && ((x1->id.proto != x->id.proto) ||
1315                     !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1316                         to_put = x1;
1317                         x1 = NULL;
1318                 }
1319         }
1320
1321         if (use_spi && !x1)
1322                 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1323                                      x->props.reqid, x->if_id, x->id.proto,
1324                                      &x->id.daddr, &x->props.saddr, 0);
1325
1326         __xfrm_state_bump_genids(x);
1327         __xfrm_state_insert(x);
1328         err = 0;
1329
1330 out:
1331         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1332
1333         if (x1) {
1334                 xfrm_state_delete(x1);
1335                 xfrm_state_put(x1);
1336         }
1337
1338         if (to_put)
1339                 xfrm_state_put(to_put);
1340
1341         return err;
1342 }
1343 EXPORT_SYMBOL(xfrm_state_add);
1344
1345 #ifdef CONFIG_XFRM_MIGRATE
1346 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
1347 {
1348         struct xfrm_user_sec_ctx *uctx;
1349         int size = sizeof(*uctx) + security->ctx_len;
1350         int err;
1351
1352         uctx = kmalloc(size, GFP_KERNEL);
1353         if (!uctx)
1354                 return -ENOMEM;
1355
1356         uctx->exttype = XFRMA_SEC_CTX;
1357         uctx->len = size;
1358         uctx->ctx_doi = security->ctx_doi;
1359         uctx->ctx_alg = security->ctx_alg;
1360         uctx->ctx_len = security->ctx_len;
1361         memcpy(uctx + 1, security->ctx_str, security->ctx_len);
1362         err = security_xfrm_state_alloc(x, uctx);
1363         kfree(uctx);
1364         if (err)
1365                 return err;
1366
1367         return 0;
1368 }
1369
1370 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1371                                            struct xfrm_encap_tmpl *encap)
1372 {
1373         struct net *net = xs_net(orig);
1374         struct xfrm_state *x = xfrm_state_alloc(net);
1375         if (!x)
1376                 goto out;
1377
1378         memcpy(&x->id, &orig->id, sizeof(x->id));
1379         memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1380         memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1381         x->props.mode = orig->props.mode;
1382         x->props.replay_window = orig->props.replay_window;
1383         x->props.reqid = orig->props.reqid;
1384         x->props.family = orig->props.family;
1385         x->props.saddr = orig->props.saddr;
1386
1387         if (orig->aalg) {
1388                 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1389                 if (!x->aalg)
1390                         goto error;
1391         }
1392         x->props.aalgo = orig->props.aalgo;
1393
1394         if (orig->aead) {
1395                 x->aead = xfrm_algo_aead_clone(orig->aead);
1396                 x->geniv = orig->geniv;
1397                 if (!x->aead)
1398                         goto error;
1399         }
1400         if (orig->ealg) {
1401                 x->ealg = xfrm_algo_clone(orig->ealg);
1402                 if (!x->ealg)
1403                         goto error;
1404         }
1405         x->props.ealgo = orig->props.ealgo;
1406
1407         if (orig->calg) {
1408                 x->calg = xfrm_algo_clone(orig->calg);
1409                 if (!x->calg)
1410                         goto error;
1411         }
1412         x->props.calgo = orig->props.calgo;
1413
1414         if (encap || orig->encap) {
1415                 if (encap)
1416                         x->encap = kmemdup(encap, sizeof(*x->encap),
1417                                         GFP_KERNEL);
1418                 else
1419                         x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1420                                         GFP_KERNEL);
1421
1422                 if (!x->encap)
1423                         goto error;
1424         }
1425
1426         if (orig->security)
1427                 if (clone_security(x, orig->security))
1428                         goto error;
1429
1430         if (orig->coaddr) {
1431                 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1432                                     GFP_KERNEL);
1433                 if (!x->coaddr)
1434                         goto error;
1435         }
1436
1437         if (orig->replay_esn) {
1438                 if (xfrm_replay_clone(x, orig))
1439                         goto error;
1440         }
1441
1442         memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1443         memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
1444
1445         if (xfrm_init_state(x) < 0)
1446                 goto error;
1447
1448         x->props.flags = orig->props.flags;
1449         x->props.extra_flags = orig->props.extra_flags;
1450
1451         x->if_id = orig->if_id;
1452         x->tfcpad = orig->tfcpad;
1453         x->replay_maxdiff = orig->replay_maxdiff;
1454         x->replay_maxage = orig->replay_maxage;
1455         memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
1456         x->km.state = orig->km.state;
1457         x->km.seq = orig->km.seq;
1458         x->replay = orig->replay;
1459         x->preplay = orig->preplay;
1460
1461         return x;
1462
1463  error:
1464         xfrm_state_put(x);
1465 out:
1466         return NULL;
1467 }
1468
1469 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
1470 {
1471         unsigned int h;
1472         struct xfrm_state *x = NULL;
1473
1474         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1475
1476         if (m->reqid) {
1477                 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1478                                   m->reqid, m->old_family);
1479                 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1480                         if (x->props.mode != m->mode ||
1481                             x->id.proto != m->proto)
1482                                 continue;
1483                         if (m->reqid && x->props.reqid != m->reqid)
1484                                 continue;
1485                         if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1486                                              m->old_family) ||
1487                             !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1488                                              m->old_family))
1489                                 continue;
1490                         xfrm_state_hold(x);
1491                         break;
1492                 }
1493         } else {
1494                 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1495                                   m->old_family);
1496                 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1497                         if (x->props.mode != m->mode ||
1498                             x->id.proto != m->proto)
1499                                 continue;
1500                         if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1501                                              m->old_family) ||
1502                             !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1503                                              m->old_family))
1504                                 continue;
1505                         xfrm_state_hold(x);
1506                         break;
1507                 }
1508         }
1509
1510         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1511
1512         return x;
1513 }
1514 EXPORT_SYMBOL(xfrm_migrate_state_find);
1515
1516 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1517                                       struct xfrm_migrate *m,
1518                                       struct xfrm_encap_tmpl *encap)
1519 {
1520         struct xfrm_state *xc;
1521
1522         xc = xfrm_state_clone(x, encap);
1523         if (!xc)
1524                 return NULL;
1525
1526         memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1527         memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1528
1529         /* add state */
1530         if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1531                 /* a care is needed when the destination address of the
1532                    state is to be updated as it is a part of triplet */
1533                 xfrm_state_insert(xc);
1534         } else {
1535                 if (xfrm_state_add(xc) < 0)
1536                         goto error;
1537         }
1538
1539         return xc;
1540 error:
1541         xfrm_state_put(xc);
1542         return NULL;
1543 }
1544 EXPORT_SYMBOL(xfrm_state_migrate);
1545 #endif
1546
1547 int xfrm_state_update(struct xfrm_state *x)
1548 {
1549         struct xfrm_state *x1, *to_put;
1550         int err;
1551         int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1552         struct net *net = xs_net(x);
1553
1554         to_put = NULL;
1555
1556         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1557         x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1558
1559         err = -ESRCH;
1560         if (!x1)
1561                 goto out;
1562
1563         if (xfrm_state_kern(x1)) {
1564                 to_put = x1;
1565                 err = -EEXIST;
1566                 goto out;
1567         }
1568
1569         if (x1->km.state == XFRM_STATE_ACQ) {
1570                 __xfrm_state_insert(x);
1571                 x = NULL;
1572         }
1573         err = 0;
1574
1575 out:
1576         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1577
1578         if (to_put)
1579                 xfrm_state_put(to_put);
1580
1581         if (err)
1582                 return err;
1583
1584         if (!x) {
1585                 xfrm_state_delete(x1);
1586                 xfrm_state_put(x1);
1587                 return 0;
1588         }
1589
1590         err = -EINVAL;
1591         spin_lock_bh(&x1->lock);
1592         if (likely(x1->km.state == XFRM_STATE_VALID)) {
1593                 if (x->encap && x1->encap &&
1594                     x->encap->encap_type == x1->encap->encap_type)
1595                         memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1596                 else if (x->encap || x1->encap)
1597                         goto fail;
1598
1599                 if (x->coaddr && x1->coaddr) {
1600                         memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1601                 }
1602                 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1603                         memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1604                 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1605                 x1->km.dying = 0;
1606
1607                 tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1608                 if (x1->curlft.use_time)
1609                         xfrm_state_check_expire(x1);
1610
1611                 if (x->props.smark.m || x->props.smark.v || x->if_id) {
1612                         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1613
1614                         if (x->props.smark.m || x->props.smark.v)
1615                                 x1->props.smark = x->props.smark;
1616
1617                         if (x->if_id)
1618                                 x1->if_id = x->if_id;
1619
1620                         __xfrm_state_bump_genids(x1);
1621                         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1622                 }
1623
1624                 err = 0;
1625                 x->km.state = XFRM_STATE_DEAD;
1626                 __xfrm_state_put(x);
1627         }
1628
1629 fail:
1630         spin_unlock_bh(&x1->lock);
1631
1632         xfrm_state_put(x1);
1633
1634         return err;
1635 }
1636 EXPORT_SYMBOL(xfrm_state_update);
1637
1638 int xfrm_state_check_expire(struct xfrm_state *x)
1639 {
1640         if (!x->curlft.use_time)
1641                 x->curlft.use_time = ktime_get_real_seconds();
1642
1643         if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1644             x->curlft.packets >= x->lft.hard_packet_limit) {
1645                 x->km.state = XFRM_STATE_EXPIRED;
1646                 tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
1647                 return -EINVAL;
1648         }
1649
1650         if (!x->km.dying &&
1651             (x->curlft.bytes >= x->lft.soft_byte_limit ||
1652              x->curlft.packets >= x->lft.soft_packet_limit)) {
1653                 x->km.dying = 1;
1654                 km_state_expired(x, 0, 0);
1655         }
1656         return 0;
1657 }
1658 EXPORT_SYMBOL(xfrm_state_check_expire);
1659
1660 struct xfrm_state *
1661 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1662                   u8 proto, unsigned short family)
1663 {
1664         struct xfrm_state *x;
1665
1666         rcu_read_lock();
1667         x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1668         rcu_read_unlock();
1669         return x;
1670 }
1671 EXPORT_SYMBOL(xfrm_state_lookup);
1672
1673 struct xfrm_state *
1674 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1675                          const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1676                          u8 proto, unsigned short family)
1677 {
1678         struct xfrm_state *x;
1679
1680         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1681         x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1682         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1683         return x;
1684 }
1685 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1686
1687 struct xfrm_state *
1688 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1689               u32 if_id, u8 proto, const xfrm_address_t *daddr,
1690               const xfrm_address_t *saddr, int create, unsigned short family)
1691 {
1692         struct xfrm_state *x;
1693
1694         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1695         x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
1696         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1697
1698         return x;
1699 }
1700 EXPORT_SYMBOL(xfrm_find_acq);
1701
1702 #ifdef CONFIG_XFRM_SUB_POLICY
1703 int
1704 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1705                unsigned short family, struct net *net)
1706 {
1707         int i;
1708         int err = 0;
1709         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1710         if (!afinfo)
1711                 return -EAFNOSUPPORT;
1712
1713         spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
1714         if (afinfo->tmpl_sort)
1715                 err = afinfo->tmpl_sort(dst, src, n);
1716         else
1717                 for (i = 0; i < n; i++)
1718                         dst[i] = src[i];
1719         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1720         rcu_read_unlock();
1721         return err;
1722 }
1723 EXPORT_SYMBOL(xfrm_tmpl_sort);
1724
1725 int
1726 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1727                 unsigned short family)
1728 {
1729         int i;
1730         int err = 0;
1731         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1732         struct net *net = xs_net(*src);
1733
1734         if (!afinfo)
1735                 return -EAFNOSUPPORT;
1736
1737         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1738         if (afinfo->state_sort)
1739                 err = afinfo->state_sort(dst, src, n);
1740         else
1741                 for (i = 0; i < n; i++)
1742                         dst[i] = src[i];
1743         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1744         rcu_read_unlock();
1745         return err;
1746 }
1747 EXPORT_SYMBOL(xfrm_state_sort);
1748 #endif
1749
1750 /* Silly enough, but I'm lazy to build resolution list */
1751
1752 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1753 {
1754         int i;
1755
1756         for (i = 0; i <= net->xfrm.state_hmask; i++) {
1757                 struct xfrm_state *x;
1758
1759                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1760                         if (x->km.seq == seq &&
1761                             (mark & x->mark.m) == x->mark.v &&
1762                             x->km.state == XFRM_STATE_ACQ) {
1763                                 xfrm_state_hold(x);
1764                                 return x;
1765                         }
1766                 }
1767         }
1768         return NULL;
1769 }
1770
1771 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1772 {
1773         struct xfrm_state *x;
1774
1775         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1776         x = __xfrm_find_acq_byseq(net, mark, seq);
1777         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1778         return x;
1779 }
1780 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1781
1782 u32 xfrm_get_acqseq(void)
1783 {
1784         u32 res;
1785         static atomic_t acqseq;
1786
1787         do {
1788                 res = atomic_inc_return(&acqseq);
1789         } while (!res);
1790
1791         return res;
1792 }
1793 EXPORT_SYMBOL(xfrm_get_acqseq);
1794
1795 int verify_spi_info(u8 proto, u32 min, u32 max)
1796 {
1797         switch (proto) {
1798         case IPPROTO_AH:
1799         case IPPROTO_ESP:
1800                 break;
1801
1802         case IPPROTO_COMP:
1803                 /* IPCOMP spi is 16-bits. */
1804                 if (max >= 0x10000)
1805                         return -EINVAL;
1806                 break;
1807
1808         default:
1809                 return -EINVAL;
1810         }
1811
1812         if (min > max)
1813                 return -EINVAL;
1814
1815         return 0;
1816 }
1817 EXPORT_SYMBOL(verify_spi_info);
1818
1819 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1820 {
1821         struct net *net = xs_net(x);
1822         unsigned int h;
1823         struct xfrm_state *x0;
1824         int err = -ENOENT;
1825         __be32 minspi = htonl(low);
1826         __be32 maxspi = htonl(high);
1827         __be32 newspi = 0;
1828         u32 mark = x->mark.v & x->mark.m;
1829
1830         spin_lock_bh(&x->lock);
1831         if (x->km.state == XFRM_STATE_DEAD)
1832                 goto unlock;
1833
1834         err = 0;
1835         if (x->id.spi)
1836                 goto unlock;
1837
1838         err = -ENOENT;
1839
1840         if (minspi == maxspi) {
1841                 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1842                 if (x0) {
1843                         xfrm_state_put(x0);
1844                         goto unlock;
1845                 }
1846                 newspi = minspi;
1847         } else {
1848                 u32 spi = 0;
1849                 for (h = 0; h < high-low+1; h++) {
1850                         spi = low + prandom_u32()%(high-low+1);
1851                         x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1852                         if (x0 == NULL) {
1853                                 newspi = htonl(spi);
1854                                 break;
1855                         }
1856                         xfrm_state_put(x0);
1857                 }
1858         }
1859         if (newspi) {
1860                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1861                 x->id.spi = newspi;
1862                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1863                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1864                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1865
1866                 err = 0;
1867         }
1868
1869 unlock:
1870         spin_unlock_bh(&x->lock);
1871
1872         return err;
1873 }
1874 EXPORT_SYMBOL(xfrm_alloc_spi);
1875
1876 static bool __xfrm_state_filter_match(struct xfrm_state *x,
1877                                       struct xfrm_address_filter *filter)
1878 {
1879         if (filter) {
1880                 if ((filter->family == AF_INET ||
1881                      filter->family == AF_INET6) &&
1882                     x->props.family != filter->family)
1883                         return false;
1884
1885                 return addr_match(&x->props.saddr, &filter->saddr,
1886                                   filter->splen) &&
1887                        addr_match(&x->id.daddr, &filter->daddr,
1888                                   filter->dplen);
1889         }
1890         return true;
1891 }
1892
1893 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1894                     int (*func)(struct xfrm_state *, int, void*),
1895                     void *data)
1896 {
1897         struct xfrm_state *state;
1898         struct xfrm_state_walk *x;
1899         int err = 0;
1900
1901         if (walk->seq != 0 && list_empty(&walk->all))
1902                 return 0;
1903
1904         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1905         if (list_empty(&walk->all))
1906                 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
1907         else
1908                 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
1909         list_for_each_entry_from(x, &net->xfrm.state_all, all) {
1910                 if (x->state == XFRM_STATE_DEAD)
1911                         continue;
1912                 state = container_of(x, struct xfrm_state, km);
1913                 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1914                         continue;
1915                 if (!__xfrm_state_filter_match(state, walk->filter))
1916                         continue;
1917                 err = func(state, walk->seq, data);
1918                 if (err) {
1919                         list_move_tail(&walk->all, &x->all);
1920                         goto out;
1921                 }
1922                 walk->seq++;
1923         }
1924         if (walk->seq == 0) {
1925                 err = -ENOENT;
1926                 goto out;
1927         }
1928         list_del_init(&walk->all);
1929 out:
1930         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1931         return err;
1932 }
1933 EXPORT_SYMBOL(xfrm_state_walk);
1934
1935 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1936                           struct xfrm_address_filter *filter)
1937 {
1938         INIT_LIST_HEAD(&walk->all);
1939         walk->proto = proto;
1940         walk->state = XFRM_STATE_DEAD;
1941         walk->seq = 0;
1942         walk->filter = filter;
1943 }
1944 EXPORT_SYMBOL(xfrm_state_walk_init);
1945
1946 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
1947 {
1948         kfree(walk->filter);
1949
1950         if (list_empty(&walk->all))
1951                 return;
1952
1953         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1954         list_del(&walk->all);
1955         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1956 }
1957 EXPORT_SYMBOL(xfrm_state_walk_done);
1958
1959 static void xfrm_replay_timer_handler(struct timer_list *t)
1960 {
1961         struct xfrm_state *x = from_timer(x, t, rtimer);
1962
1963         spin_lock(&x->lock);
1964
1965         if (x->km.state == XFRM_STATE_VALID) {
1966                 if (xfrm_aevent_is_on(xs_net(x)))
1967                         x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
1968                 else
1969                         x->xflags |= XFRM_TIME_DEFER;
1970         }
1971
1972         spin_unlock(&x->lock);
1973 }
1974
1975 static LIST_HEAD(xfrm_km_list);
1976
1977 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
1978 {
1979         struct xfrm_mgr *km;
1980
1981         rcu_read_lock();
1982         list_for_each_entry_rcu(km, &xfrm_km_list, list)
1983                 if (km->notify_policy)
1984                         km->notify_policy(xp, dir, c);
1985         rcu_read_unlock();
1986 }
1987
1988 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1989 {
1990         struct xfrm_mgr *km;
1991         rcu_read_lock();
1992         list_for_each_entry_rcu(km, &xfrm_km_list, list)
1993                 if (km->notify)
1994                         km->notify(x, c);
1995         rcu_read_unlock();
1996 }
1997
1998 EXPORT_SYMBOL(km_policy_notify);
1999 EXPORT_SYMBOL(km_state_notify);
2000
2001 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
2002 {
2003         struct km_event c;
2004
2005         c.data.hard = hard;
2006         c.portid = portid;
2007         c.event = XFRM_MSG_EXPIRE;
2008         km_state_notify(x, &c);
2009 }
2010
2011 EXPORT_SYMBOL(km_state_expired);
2012 /*
2013  * We send to all registered managers regardless of failure
2014  * We are happy with one success
2015 */
2016 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
2017 {
2018         int err = -EINVAL, acqret;
2019         struct xfrm_mgr *km;
2020
2021         rcu_read_lock();
2022         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2023                 acqret = km->acquire(x, t, pol);
2024                 if (!acqret)
2025                         err = acqret;
2026         }
2027         rcu_read_unlock();
2028         return err;
2029 }
2030 EXPORT_SYMBOL(km_query);
2031
2032 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2033 {
2034         int err = -EINVAL;
2035         struct xfrm_mgr *km;
2036
2037         rcu_read_lock();
2038         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2039                 if (km->new_mapping)
2040                         err = km->new_mapping(x, ipaddr, sport);
2041                 if (!err)
2042                         break;
2043         }
2044         rcu_read_unlock();
2045         return err;
2046 }
2047 EXPORT_SYMBOL(km_new_mapping);
2048
2049 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2050 {
2051         struct km_event c;
2052
2053         c.data.hard = hard;
2054         c.portid = portid;
2055         c.event = XFRM_MSG_POLEXPIRE;
2056         km_policy_notify(pol, dir, &c);
2057 }
2058 EXPORT_SYMBOL(km_policy_expired);
2059
2060 #ifdef CONFIG_XFRM_MIGRATE
2061 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2062                const struct xfrm_migrate *m, int num_migrate,
2063                const struct xfrm_kmaddress *k,
2064                const struct xfrm_encap_tmpl *encap)
2065 {
2066         int err = -EINVAL;
2067         int ret;
2068         struct xfrm_mgr *km;
2069
2070         rcu_read_lock();
2071         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2072                 if (km->migrate) {
2073                         ret = km->migrate(sel, dir, type, m, num_migrate, k,
2074                                           encap);
2075                         if (!ret)
2076                                 err = ret;
2077                 }
2078         }
2079         rcu_read_unlock();
2080         return err;
2081 }
2082 EXPORT_SYMBOL(km_migrate);
2083 #endif
2084
2085 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2086 {
2087         int err = -EINVAL;
2088         int ret;
2089         struct xfrm_mgr *km;
2090
2091         rcu_read_lock();
2092         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2093                 if (km->report) {
2094                         ret = km->report(net, proto, sel, addr);
2095                         if (!ret)
2096                                 err = ret;
2097                 }
2098         }
2099         rcu_read_unlock();
2100         return err;
2101 }
2102 EXPORT_SYMBOL(km_report);
2103
2104 bool km_is_alive(const struct km_event *c)
2105 {
2106         struct xfrm_mgr *km;
2107         bool is_alive = false;
2108
2109         rcu_read_lock();
2110         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2111                 if (km->is_alive && km->is_alive(c)) {
2112                         is_alive = true;
2113                         break;
2114                 }
2115         }
2116         rcu_read_unlock();
2117
2118         return is_alive;
2119 }
2120 EXPORT_SYMBOL(km_is_alive);
2121
2122 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
2123 {
2124         int err;
2125         u8 *data;
2126         struct xfrm_mgr *km;
2127         struct xfrm_policy *pol = NULL;
2128
2129 #ifdef CONFIG_COMPAT
2130         if (in_compat_syscall())
2131                 return -EOPNOTSUPP;
2132 #endif
2133
2134         if (!optval && !optlen) {
2135                 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2136                 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2137                 __sk_dst_reset(sk);
2138                 return 0;
2139         }
2140
2141         if (optlen <= 0 || optlen > PAGE_SIZE)
2142                 return -EMSGSIZE;
2143
2144         data = memdup_user(optval, optlen);
2145         if (IS_ERR(data))
2146                 return PTR_ERR(data);
2147
2148         err = -EINVAL;
2149         rcu_read_lock();
2150         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2151                 pol = km->compile_policy(sk, optname, data,
2152                                          optlen, &err);
2153                 if (err >= 0)
2154                         break;
2155         }
2156         rcu_read_unlock();
2157
2158         if (err >= 0) {
2159                 xfrm_sk_policy_insert(sk, err, pol);
2160                 xfrm_pol_put(pol);
2161                 __sk_dst_reset(sk);
2162                 err = 0;
2163         }
2164
2165         kfree(data);
2166         return err;
2167 }
2168 EXPORT_SYMBOL(xfrm_user_policy);
2169
2170 static DEFINE_SPINLOCK(xfrm_km_lock);
2171
2172 int xfrm_register_km(struct xfrm_mgr *km)
2173 {
2174         spin_lock_bh(&xfrm_km_lock);
2175         list_add_tail_rcu(&km->list, &xfrm_km_list);
2176         spin_unlock_bh(&xfrm_km_lock);
2177         return 0;
2178 }
2179 EXPORT_SYMBOL(xfrm_register_km);
2180
2181 int xfrm_unregister_km(struct xfrm_mgr *km)
2182 {
2183         spin_lock_bh(&xfrm_km_lock);
2184         list_del_rcu(&km->list);
2185         spin_unlock_bh(&xfrm_km_lock);
2186         synchronize_rcu();
2187         return 0;
2188 }
2189 EXPORT_SYMBOL(xfrm_unregister_km);
2190
2191 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2192 {
2193         int err = 0;
2194
2195         if (WARN_ON(afinfo->family >= NPROTO))
2196                 return -EAFNOSUPPORT;
2197
2198         spin_lock_bh(&xfrm_state_afinfo_lock);
2199         if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2200                 err = -EEXIST;
2201         else
2202                 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2203         spin_unlock_bh(&xfrm_state_afinfo_lock);
2204         return err;
2205 }
2206 EXPORT_SYMBOL(xfrm_state_register_afinfo);
2207
2208 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2209 {
2210         int err = 0, family = afinfo->family;
2211
2212         if (WARN_ON(family >= NPROTO))
2213                 return -EAFNOSUPPORT;
2214
2215         spin_lock_bh(&xfrm_state_afinfo_lock);
2216         if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2217                 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2218                         err = -EINVAL;
2219                 else
2220                         RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2221         }
2222         spin_unlock_bh(&xfrm_state_afinfo_lock);
2223         synchronize_rcu();
2224         return err;
2225 }
2226 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2227
2228 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2229 {
2230         if (unlikely(family >= NPROTO))
2231                 return NULL;
2232
2233         return rcu_dereference(xfrm_state_afinfo[family]);
2234 }
2235
2236 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2237 {
2238         struct xfrm_state_afinfo *afinfo;
2239         if (unlikely(family >= NPROTO))
2240                 return NULL;
2241         rcu_read_lock();
2242         afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2243         if (unlikely(!afinfo))
2244                 rcu_read_unlock();
2245         return afinfo;
2246 }
2247
2248 void xfrm_flush_gc(void)
2249 {
2250         flush_work(&xfrm_state_gc_work);
2251 }
2252 EXPORT_SYMBOL(xfrm_flush_gc);
2253
2254 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2255 void xfrm_state_delete_tunnel(struct xfrm_state *x)
2256 {
2257         if (x->tunnel) {
2258                 struct xfrm_state *t = x->tunnel;
2259
2260                 if (atomic_read(&t->tunnel_users) == 2)
2261                         xfrm_state_delete(t);
2262                 atomic_dec(&t->tunnel_users);
2263                 xfrm_state_put_sync(t);
2264                 x->tunnel = NULL;
2265         }
2266 }
2267 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2268
2269 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2270 {
2271         const struct xfrm_type *type = READ_ONCE(x->type);
2272
2273         if (x->km.state == XFRM_STATE_VALID &&
2274             type && type->get_mtu)
2275                 return type->get_mtu(x, mtu);
2276
2277         return mtu - x->props.header_len;
2278 }
2279
2280 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2281 {
2282         struct xfrm_state_afinfo *afinfo;
2283         struct xfrm_mode *inner_mode;
2284         int family = x->props.family;
2285         int err;
2286
2287         err = -EAFNOSUPPORT;
2288         afinfo = xfrm_state_get_afinfo(family);
2289         if (!afinfo)
2290                 goto error;
2291
2292         err = 0;
2293         if (afinfo->init_flags)
2294                 err = afinfo->init_flags(x);
2295
2296         rcu_read_unlock();
2297
2298         if (err)
2299                 goto error;
2300
2301         err = -EPROTONOSUPPORT;
2302
2303         if (x->sel.family != AF_UNSPEC) {
2304                 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2305                 if (inner_mode == NULL)
2306                         goto error;
2307
2308                 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2309                     family != x->sel.family) {
2310                         xfrm_put_mode(inner_mode);
2311                         goto error;
2312                 }
2313
2314                 x->inner_mode = inner_mode;
2315         } else {
2316                 struct xfrm_mode *inner_mode_iaf;
2317                 int iafamily = AF_INET;
2318
2319                 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2320                 if (inner_mode == NULL)
2321                         goto error;
2322
2323                 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2324                         xfrm_put_mode(inner_mode);
2325                         goto error;
2326                 }
2327                 x->inner_mode = inner_mode;
2328
2329                 if (x->props.family == AF_INET)
2330                         iafamily = AF_INET6;
2331
2332                 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2333                 if (inner_mode_iaf) {
2334                         if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
2335                                 x->inner_mode_iaf = inner_mode_iaf;
2336                         else
2337                                 xfrm_put_mode(inner_mode_iaf);
2338                 }
2339         }
2340
2341         x->type = xfrm_get_type(x->id.proto, family);
2342         if (x->type == NULL)
2343                 goto error;
2344
2345         x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
2346
2347         err = x->type->init_state(x);
2348         if (err)
2349                 goto error;
2350
2351         x->outer_mode = xfrm_get_mode(x->props.mode, family);
2352         if (x->outer_mode == NULL) {
2353                 err = -EPROTONOSUPPORT;
2354                 goto error;
2355         }
2356
2357         if (init_replay) {
2358                 err = xfrm_init_replay(x);
2359                 if (err)
2360                         goto error;
2361         }
2362
2363 error:
2364         return err;
2365 }
2366
2367 EXPORT_SYMBOL(__xfrm_init_state);
2368
2369 int xfrm_init_state(struct xfrm_state *x)
2370 {
2371         int err;
2372
2373         err = __xfrm_init_state(x, true, false);
2374         if (!err)
2375                 x->km.state = XFRM_STATE_VALID;
2376
2377         return err;
2378 }
2379
2380 EXPORT_SYMBOL(xfrm_init_state);
2381
2382 int __net_init xfrm_state_init(struct net *net)
2383 {
2384         unsigned int sz;
2385
2386         if (net_eq(net, &init_net))
2387                 xfrm_state_cache = KMEM_CACHE(xfrm_state,
2388                                               SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2389
2390         INIT_LIST_HEAD(&net->xfrm.state_all);
2391
2392         sz = sizeof(struct hlist_head) * 8;
2393
2394         net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2395         if (!net->xfrm.state_bydst)
2396                 goto out_bydst;
2397         net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2398         if (!net->xfrm.state_bysrc)
2399                 goto out_bysrc;
2400         net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2401         if (!net->xfrm.state_byspi)
2402                 goto out_byspi;
2403         net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2404
2405         net->xfrm.state_num = 0;
2406         INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2407         spin_lock_init(&net->xfrm.xfrm_state_lock);
2408         seqcount_init(&net->xfrm.xfrm_state_hash_generation);
2409         return 0;
2410
2411 out_byspi:
2412         xfrm_hash_free(net->xfrm.state_bysrc, sz);
2413 out_bysrc:
2414         xfrm_hash_free(net->xfrm.state_bydst, sz);
2415 out_bydst:
2416         return -ENOMEM;
2417 }
2418
2419 void xfrm_state_fini(struct net *net)
2420 {
2421         unsigned int sz;
2422
2423         flush_work(&net->xfrm.state_hash_work);
2424         flush_work(&xfrm_state_gc_work);
2425         xfrm_state_flush(net, 0, false, true);
2426
2427         WARN_ON(!list_empty(&net->xfrm.state_all));
2428
2429         sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2430         WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2431         xfrm_hash_free(net->xfrm.state_byspi, sz);
2432         WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2433         xfrm_hash_free(net->xfrm.state_bysrc, sz);
2434         WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2435         xfrm_hash_free(net->xfrm.state_bydst, sz);
2436 }
2437
2438 #ifdef CONFIG_AUDITSYSCALL
2439 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2440                                      struct audit_buffer *audit_buf)
2441 {
2442         struct xfrm_sec_ctx *ctx = x->security;
2443         u32 spi = ntohl(x->id.spi);
2444
2445         if (ctx)
2446                 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2447                                  ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2448
2449         switch (x->props.family) {
2450         case AF_INET:
2451                 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2452                                  &x->props.saddr.a4, &x->id.daddr.a4);
2453                 break;
2454         case AF_INET6:
2455                 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2456                                  x->props.saddr.a6, x->id.daddr.a6);
2457                 break;
2458         }
2459
2460         audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2461 }
2462
2463 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2464                                       struct audit_buffer *audit_buf)
2465 {
2466         const struct iphdr *iph4;
2467         const struct ipv6hdr *iph6;
2468
2469         switch (family) {
2470         case AF_INET:
2471                 iph4 = ip_hdr(skb);
2472                 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2473                                  &iph4->saddr, &iph4->daddr);
2474                 break;
2475         case AF_INET6:
2476                 iph6 = ipv6_hdr(skb);
2477                 audit_log_format(audit_buf,
2478                                  " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2479                                  &iph6->saddr, &iph6->daddr,
2480                                  iph6->flow_lbl[0] & 0x0f,
2481                                  iph6->flow_lbl[1],
2482                                  iph6->flow_lbl[2]);
2483                 break;
2484         }
2485 }
2486
2487 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2488 {
2489         struct audit_buffer *audit_buf;
2490
2491         audit_buf = xfrm_audit_start("SAD-add");
2492         if (audit_buf == NULL)
2493                 return;
2494         xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2495         xfrm_audit_helper_sainfo(x, audit_buf);
2496         audit_log_format(audit_buf, " res=%u", result);
2497         audit_log_end(audit_buf);
2498 }
2499 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2500
2501 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2502 {
2503         struct audit_buffer *audit_buf;
2504
2505         audit_buf = xfrm_audit_start("SAD-delete");
2506         if (audit_buf == NULL)
2507                 return;
2508         xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2509         xfrm_audit_helper_sainfo(x, audit_buf);
2510         audit_log_format(audit_buf, " res=%u", result);
2511         audit_log_end(audit_buf);
2512 }
2513 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2514
2515 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2516                                       struct sk_buff *skb)
2517 {
2518         struct audit_buffer *audit_buf;
2519         u32 spi;
2520
2521         audit_buf = xfrm_audit_start("SA-replay-overflow");
2522         if (audit_buf == NULL)
2523                 return;
2524         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2525         /* don't record the sequence number because it's inherent in this kind
2526          * of audit message */
2527         spi = ntohl(x->id.spi);
2528         audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2529         audit_log_end(audit_buf);
2530 }
2531 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2532
2533 void xfrm_audit_state_replay(struct xfrm_state *x,
2534                              struct sk_buff *skb, __be32 net_seq)
2535 {
2536         struct audit_buffer *audit_buf;
2537         u32 spi;
2538
2539         audit_buf = xfrm_audit_start("SA-replayed-pkt");
2540         if (audit_buf == NULL)
2541                 return;
2542         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2543         spi = ntohl(x->id.spi);
2544         audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2545                          spi, spi, ntohl(net_seq));
2546         audit_log_end(audit_buf);
2547 }
2548 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2549
2550 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2551 {
2552         struct audit_buffer *audit_buf;
2553
2554         audit_buf = xfrm_audit_start("SA-notfound");
2555         if (audit_buf == NULL)
2556                 return;
2557         xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2558         audit_log_end(audit_buf);
2559 }
2560 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2561
2562 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2563                                __be32 net_spi, __be32 net_seq)
2564 {
2565         struct audit_buffer *audit_buf;
2566         u32 spi;
2567
2568         audit_buf = xfrm_audit_start("SA-notfound");
2569         if (audit_buf == NULL)
2570                 return;
2571         xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2572         spi = ntohl(net_spi);
2573         audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2574                          spi, spi, ntohl(net_seq));
2575         audit_log_end(audit_buf);
2576 }
2577 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2578
2579 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2580                               struct sk_buff *skb, u8 proto)
2581 {
2582         struct audit_buffer *audit_buf;
2583         __be32 net_spi;
2584         __be32 net_seq;
2585
2586         audit_buf = xfrm_audit_start("SA-icv-failure");
2587         if (audit_buf == NULL)
2588                 return;
2589         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2590         if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2591                 u32 spi = ntohl(net_spi);
2592                 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2593                                  spi, spi, ntohl(net_seq));
2594         }
2595         audit_log_end(audit_buf);
2596 }
2597 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2598 #endif /* CONFIG_AUDITSYSCALL */