6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <linux/uaccess.h>
24 #include <linux/ktime.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
29 #include "xfrm_hash.h"
31 #define xfrm_state_deref_prot(table, net) \
32 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
34 static void xfrm_state_gc_task(struct work_struct *work);
36 /* Each xfrm_state may be linked to two tables:
38 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40 destination/tunnel endpoint. (output)
43 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
44 static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
45 static struct kmem_cache *xfrm_state_cache __ro_after_init;
47 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
48 static HLIST_HEAD(xfrm_state_gc_list);
50 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
52 return refcount_inc_not_zero(&x->refcnt);
55 static inline unsigned int xfrm_dst_hash(struct net *net,
56 const xfrm_address_t *daddr,
57 const xfrm_address_t *saddr,
59 unsigned short family)
61 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
64 static inline unsigned int xfrm_src_hash(struct net *net,
65 const xfrm_address_t *daddr,
66 const xfrm_address_t *saddr,
67 unsigned short family)
69 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
72 static inline unsigned int
73 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
74 __be32 spi, u8 proto, unsigned short family)
76 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
79 static void xfrm_hash_transfer(struct hlist_head *list,
80 struct hlist_head *ndsttable,
81 struct hlist_head *nsrctable,
82 struct hlist_head *nspitable,
83 unsigned int nhashmask)
85 struct hlist_node *tmp;
88 hlist_for_each_entry_safe(x, tmp, list, bydst) {
91 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
92 x->props.reqid, x->props.family,
94 hlist_add_head_rcu(&x->bydst, ndsttable + h);
96 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
99 hlist_add_head_rcu(&x->bysrc, nsrctable + h);
102 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
103 x->id.proto, x->props.family,
105 hlist_add_head_rcu(&x->byspi, nspitable + h);
110 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
112 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
115 static void xfrm_hash_resize(struct work_struct *work)
117 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
118 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
119 unsigned long nsize, osize;
120 unsigned int nhashmask, ohashmask;
123 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
124 ndst = xfrm_hash_alloc(nsize);
127 nsrc = xfrm_hash_alloc(nsize);
129 xfrm_hash_free(ndst, nsize);
132 nspi = xfrm_hash_alloc(nsize);
134 xfrm_hash_free(ndst, nsize);
135 xfrm_hash_free(nsrc, nsize);
139 spin_lock_bh(&net->xfrm.xfrm_state_lock);
140 write_seqcount_begin(&xfrm_state_hash_generation);
142 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
143 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
144 for (i = net->xfrm.state_hmask; i >= 0; i--)
145 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
147 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
148 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
149 ohashmask = net->xfrm.state_hmask;
151 rcu_assign_pointer(net->xfrm.state_bydst, ndst);
152 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
153 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
154 net->xfrm.state_hmask = nhashmask;
156 write_seqcount_end(&xfrm_state_hash_generation);
157 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
159 osize = (ohashmask + 1) * sizeof(struct hlist_head);
163 xfrm_hash_free(odst, osize);
164 xfrm_hash_free(osrc, osize);
165 xfrm_hash_free(ospi, osize);
168 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
169 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
171 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
173 int __xfrm_state_delete(struct xfrm_state *x);
175 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
176 bool km_is_alive(const struct km_event *c);
177 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
179 static DEFINE_SPINLOCK(xfrm_type_lock);
180 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
182 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
183 const struct xfrm_type **typemap;
186 if (unlikely(afinfo == NULL))
187 return -EAFNOSUPPORT;
188 typemap = afinfo->type_map;
189 spin_lock_bh(&xfrm_type_lock);
191 if (likely(typemap[type->proto] == NULL))
192 typemap[type->proto] = type;
195 spin_unlock_bh(&xfrm_type_lock);
199 EXPORT_SYMBOL(xfrm_register_type);
201 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
203 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
204 const struct xfrm_type **typemap;
207 if (unlikely(afinfo == NULL))
208 return -EAFNOSUPPORT;
209 typemap = afinfo->type_map;
210 spin_lock_bh(&xfrm_type_lock);
212 if (unlikely(typemap[type->proto] != type))
215 typemap[type->proto] = NULL;
216 spin_unlock_bh(&xfrm_type_lock);
220 EXPORT_SYMBOL(xfrm_unregister_type);
222 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
224 struct xfrm_state_afinfo *afinfo;
225 const struct xfrm_type **typemap;
226 const struct xfrm_type *type;
227 int modload_attempted = 0;
230 afinfo = xfrm_state_get_afinfo(family);
231 if (unlikely(afinfo == NULL))
233 typemap = afinfo->type_map;
235 type = READ_ONCE(typemap[proto]);
236 if (unlikely(type && !try_module_get(type->owner)))
241 if (!type && !modload_attempted) {
242 request_module("xfrm-type-%d-%d", family, proto);
243 modload_attempted = 1;
250 static void xfrm_put_type(const struct xfrm_type *type)
252 module_put(type->owner);
255 static DEFINE_SPINLOCK(xfrm_type_offload_lock);
256 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
257 unsigned short family)
259 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
260 const struct xfrm_type_offload **typemap;
263 if (unlikely(afinfo == NULL))
264 return -EAFNOSUPPORT;
265 typemap = afinfo->type_offload_map;
266 spin_lock_bh(&xfrm_type_offload_lock);
268 if (likely(typemap[type->proto] == NULL))
269 typemap[type->proto] = type;
272 spin_unlock_bh(&xfrm_type_offload_lock);
276 EXPORT_SYMBOL(xfrm_register_type_offload);
278 int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
279 unsigned short family)
281 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
282 const struct xfrm_type_offload **typemap;
285 if (unlikely(afinfo == NULL))
286 return -EAFNOSUPPORT;
287 typemap = afinfo->type_offload_map;
288 spin_lock_bh(&xfrm_type_offload_lock);
290 if (unlikely(typemap[type->proto] != type))
293 typemap[type->proto] = NULL;
294 spin_unlock_bh(&xfrm_type_offload_lock);
298 EXPORT_SYMBOL(xfrm_unregister_type_offload);
300 static const struct xfrm_type_offload *
301 xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
303 struct xfrm_state_afinfo *afinfo;
304 const struct xfrm_type_offload **typemap;
305 const struct xfrm_type_offload *type;
308 afinfo = xfrm_state_get_afinfo(family);
309 if (unlikely(afinfo == NULL))
311 typemap = afinfo->type_offload_map;
313 type = typemap[proto];
314 if ((type && !try_module_get(type->owner)))
319 if (!type && try_load) {
320 request_module("xfrm-offload-%d-%d", family, proto);
328 static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
330 module_put(type->owner);
333 static DEFINE_SPINLOCK(xfrm_mode_lock);
334 int xfrm_register_mode(struct xfrm_mode *mode, int family)
336 struct xfrm_state_afinfo *afinfo;
337 struct xfrm_mode **modemap;
340 if (unlikely(mode->encap >= XFRM_MODE_MAX))
343 afinfo = xfrm_state_get_afinfo(family);
344 if (unlikely(afinfo == NULL))
345 return -EAFNOSUPPORT;
348 modemap = afinfo->mode_map;
349 spin_lock_bh(&xfrm_mode_lock);
350 if (modemap[mode->encap])
354 if (!try_module_get(afinfo->owner))
357 mode->afinfo = afinfo;
358 modemap[mode->encap] = mode;
362 spin_unlock_bh(&xfrm_mode_lock);
366 EXPORT_SYMBOL(xfrm_register_mode);
368 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
370 struct xfrm_state_afinfo *afinfo;
371 struct xfrm_mode **modemap;
374 if (unlikely(mode->encap >= XFRM_MODE_MAX))
377 afinfo = xfrm_state_get_afinfo(family);
378 if (unlikely(afinfo == NULL))
379 return -EAFNOSUPPORT;
382 modemap = afinfo->mode_map;
383 spin_lock_bh(&xfrm_mode_lock);
384 if (likely(modemap[mode->encap] == mode)) {
385 modemap[mode->encap] = NULL;
386 module_put(mode->afinfo->owner);
390 spin_unlock_bh(&xfrm_mode_lock);
394 EXPORT_SYMBOL(xfrm_unregister_mode);
396 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
398 struct xfrm_state_afinfo *afinfo;
399 struct xfrm_mode *mode;
400 int modload_attempted = 0;
402 if (unlikely(encap >= XFRM_MODE_MAX))
406 afinfo = xfrm_state_get_afinfo(family);
407 if (unlikely(afinfo == NULL))
410 mode = READ_ONCE(afinfo->mode_map[encap]);
411 if (unlikely(mode && !try_module_get(mode->owner)))
415 if (!mode && !modload_attempted) {
416 request_module("xfrm-mode-%d-%d", family, encap);
417 modload_attempted = 1;
424 static void xfrm_put_mode(struct xfrm_mode *mode)
426 module_put(mode->owner);
429 void xfrm_state_free(struct xfrm_state *x)
431 kmem_cache_free(xfrm_state_cache, x);
433 EXPORT_SYMBOL(xfrm_state_free);
435 static void ___xfrm_state_destroy(struct xfrm_state *x)
437 tasklet_hrtimer_cancel(&x->mtimer);
438 del_timer_sync(&x->rtimer);
445 kfree(x->replay_esn);
446 kfree(x->preplay_esn);
448 xfrm_put_mode(x->inner_mode);
449 if (x->inner_mode_iaf)
450 xfrm_put_mode(x->inner_mode_iaf);
452 xfrm_put_mode(x->outer_mode);
454 xfrm_put_type_offload(x->type_offload);
456 x->type->destructor(x);
457 xfrm_put_type(x->type);
459 xfrm_dev_state_free(x);
460 security_xfrm_state_free(x);
464 static void xfrm_state_gc_task(struct work_struct *work)
466 struct xfrm_state *x;
467 struct hlist_node *tmp;
468 struct hlist_head gc_list;
470 spin_lock_bh(&xfrm_state_gc_lock);
471 hlist_move_list(&xfrm_state_gc_list, &gc_list);
472 spin_unlock_bh(&xfrm_state_gc_lock);
476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
477 ___xfrm_state_destroy(x);
480 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
482 struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
483 struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
484 time64_t now = ktime_get_real_seconds();
485 time64_t next = TIME64_MAX;
490 if (x->km.state == XFRM_STATE_DEAD)
492 if (x->km.state == XFRM_STATE_EXPIRED)
494 if (x->lft.hard_add_expires_seconds) {
495 long tmo = x->lft.hard_add_expires_seconds +
496 x->curlft.add_time - now;
498 if (x->xflags & XFRM_SOFT_EXPIRE) {
499 /* enter hard expire without soft expire first?!
500 * setting a new date could trigger this.
501 * workaround: fix x->curflt.add_time by below:
503 x->curlft.add_time = now - x->saved_tmo - 1;
504 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
511 if (x->lft.hard_use_expires_seconds) {
512 long tmo = x->lft.hard_use_expires_seconds +
513 (x->curlft.use_time ? : now) - now;
521 if (x->lft.soft_add_expires_seconds) {
522 long tmo = x->lft.soft_add_expires_seconds +
523 x->curlft.add_time - now;
526 x->xflags &= ~XFRM_SOFT_EXPIRE;
527 } else if (tmo < next) {
529 x->xflags |= XFRM_SOFT_EXPIRE;
533 if (x->lft.soft_use_expires_seconds) {
534 long tmo = x->lft.soft_use_expires_seconds +
535 (x->curlft.use_time ? : now) - now;
544 km_state_expired(x, 0, 0);
546 if (next != TIME64_MAX) {
547 tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
553 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
554 x->km.state = XFRM_STATE_EXPIRED;
556 err = __xfrm_state_delete(x);
558 km_state_expired(x, 1, 0);
560 xfrm_audit_state_delete(x, err ? 0 : 1, true);
563 spin_unlock(&x->lock);
564 return HRTIMER_NORESTART;
567 static void xfrm_replay_timer_handler(struct timer_list *t);
569 struct xfrm_state *xfrm_state_alloc(struct net *net)
571 struct xfrm_state *x;
573 x = kmem_cache_alloc(xfrm_state_cache, GFP_ATOMIC | __GFP_ZERO);
576 write_pnet(&x->xs_net, net);
577 refcount_set(&x->refcnt, 1);
578 atomic_set(&x->tunnel_users, 0);
579 INIT_LIST_HEAD(&x->km.all);
580 INIT_HLIST_NODE(&x->bydst);
581 INIT_HLIST_NODE(&x->bysrc);
582 INIT_HLIST_NODE(&x->byspi);
583 tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
584 CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
585 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
586 x->curlft.add_time = ktime_get_real_seconds();
587 x->lft.soft_byte_limit = XFRM_INF;
588 x->lft.soft_packet_limit = XFRM_INF;
589 x->lft.hard_byte_limit = XFRM_INF;
590 x->lft.hard_packet_limit = XFRM_INF;
591 x->replay_maxage = 0;
592 x->replay_maxdiff = 0;
593 x->inner_mode = NULL;
594 x->inner_mode_iaf = NULL;
595 spin_lock_init(&x->lock);
599 EXPORT_SYMBOL(xfrm_state_alloc);
601 void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
603 WARN_ON(x->km.state != XFRM_STATE_DEAD);
607 ___xfrm_state_destroy(x);
609 spin_lock_bh(&xfrm_state_gc_lock);
610 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
611 spin_unlock_bh(&xfrm_state_gc_lock);
612 schedule_work(&xfrm_state_gc_work);
615 EXPORT_SYMBOL(__xfrm_state_destroy);
617 int __xfrm_state_delete(struct xfrm_state *x)
619 struct net *net = xs_net(x);
622 if (x->km.state != XFRM_STATE_DEAD) {
623 x->km.state = XFRM_STATE_DEAD;
624 spin_lock(&net->xfrm.xfrm_state_lock);
625 list_del(&x->km.all);
626 hlist_del_rcu(&x->bydst);
627 hlist_del_rcu(&x->bysrc);
629 hlist_del_rcu(&x->byspi);
630 net->xfrm.state_num--;
631 spin_unlock(&net->xfrm.xfrm_state_lock);
633 xfrm_dev_state_delete(x);
635 /* All xfrm_state objects are created by xfrm_state_alloc.
636 * The xfrm_state_alloc call gives a reference, and that
637 * is what we are dropping here.
645 EXPORT_SYMBOL(__xfrm_state_delete);
647 int xfrm_state_delete(struct xfrm_state *x)
651 spin_lock_bh(&x->lock);
652 err = __xfrm_state_delete(x);
653 spin_unlock_bh(&x->lock);
657 EXPORT_SYMBOL(xfrm_state_delete);
659 #ifdef CONFIG_SECURITY_NETWORK_XFRM
661 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
665 for (i = 0; i <= net->xfrm.state_hmask; i++) {
666 struct xfrm_state *x;
668 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
669 if (xfrm_id_proto_match(x->id.proto, proto) &&
670 (err = security_xfrm_state_delete(x)) != 0) {
671 xfrm_audit_state_delete(x, 0, task_valid);
681 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
685 for (i = 0; i <= net->xfrm.state_hmask; i++) {
686 struct xfrm_state *x;
687 struct xfrm_state_offload *xso;
689 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
692 if (xso->dev == dev &&
693 (err = security_xfrm_state_delete(x)) != 0) {
694 xfrm_audit_state_delete(x, 0, task_valid);
704 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
710 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
716 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
718 int i, err = 0, cnt = 0;
720 spin_lock_bh(&net->xfrm.xfrm_state_lock);
721 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
726 for (i = 0; i <= net->xfrm.state_hmask; i++) {
727 struct xfrm_state *x;
729 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
730 if (!xfrm_state_kern(x) &&
731 xfrm_id_proto_match(x->id.proto, proto)) {
733 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
735 err = xfrm_state_delete(x);
736 xfrm_audit_state_delete(x, err ? 0 : 1,
739 xfrm_state_put_sync(x);
745 spin_lock_bh(&net->xfrm.xfrm_state_lock);
751 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
757 EXPORT_SYMBOL(xfrm_state_flush);
759 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
761 int i, err = 0, cnt = 0;
763 spin_lock_bh(&net->xfrm.xfrm_state_lock);
764 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
769 for (i = 0; i <= net->xfrm.state_hmask; i++) {
770 struct xfrm_state *x;
771 struct xfrm_state_offload *xso;
773 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
776 if (!xfrm_state_kern(x) && xso->dev == dev) {
778 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
780 err = xfrm_state_delete(x);
781 xfrm_audit_state_delete(x, err ? 0 : 1,
787 spin_lock_bh(&net->xfrm.xfrm_state_lock);
796 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
799 EXPORT_SYMBOL(xfrm_dev_state_flush);
801 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
803 spin_lock_bh(&net->xfrm.xfrm_state_lock);
804 si->sadcnt = net->xfrm.state_num;
805 si->sadhcnt = net->xfrm.state_hmask + 1;
806 si->sadhmcnt = xfrm_state_hashmax;
807 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
809 EXPORT_SYMBOL(xfrm_sad_getinfo);
812 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
813 const struct xfrm_tmpl *tmpl,
814 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
815 unsigned short family)
817 struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family);
822 afinfo->init_tempsel(&x->sel, fl);
824 if (family != tmpl->encap_family) {
825 afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family);
829 afinfo->init_temprop(x, tmpl, daddr, saddr);
832 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
833 const xfrm_address_t *daddr,
834 __be32 spi, u8 proto,
835 unsigned short family)
837 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
838 struct xfrm_state *x;
840 hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
841 if (x->props.family != family ||
843 x->id.proto != proto ||
844 !xfrm_addr_equal(&x->id.daddr, daddr, family))
847 if ((mark & x->mark.m) != x->mark.v)
849 if (!xfrm_state_hold_rcu(x))
857 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
858 const xfrm_address_t *daddr,
859 const xfrm_address_t *saddr,
860 u8 proto, unsigned short family)
862 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
863 struct xfrm_state *x;
865 hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
866 if (x->props.family != family ||
867 x->id.proto != proto ||
868 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
869 !xfrm_addr_equal(&x->props.saddr, saddr, family))
872 if ((mark & x->mark.m) != x->mark.v)
874 if (!xfrm_state_hold_rcu(x))
882 static inline struct xfrm_state *
883 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
885 struct net *net = xs_net(x);
886 u32 mark = x->mark.v & x->mark.m;
889 return __xfrm_state_lookup(net, mark, &x->id.daddr,
890 x->id.spi, x->id.proto, family);
892 return __xfrm_state_lookup_byaddr(net, mark,
895 x->id.proto, family);
898 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
900 if (have_hash_collision &&
901 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
902 net->xfrm.state_num > net->xfrm.state_hmask)
903 schedule_work(&net->xfrm.state_hash_work);
906 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
907 const struct flowi *fl, unsigned short family,
908 struct xfrm_state **best, int *acq_in_progress,
912 * 1. There is a valid state with matching selector. Done.
913 * 2. Valid state with inappropriate selector. Skip.
915 * Entering area of "sysdeps".
917 * 3. If state is not valid, selector is temporary, it selects
918 * only session which triggered previous resolution. Key
919 * manager will do something to install a state with proper
922 if (x->km.state == XFRM_STATE_VALID) {
923 if ((x->sel.family &&
924 !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
925 !security_xfrm_state_pol_flow_match(x, pol, fl))
929 (*best)->km.dying > x->km.dying ||
930 ((*best)->km.dying == x->km.dying &&
931 (*best)->curlft.add_time < x->curlft.add_time))
933 } else if (x->km.state == XFRM_STATE_ACQ) {
934 *acq_in_progress = 1;
935 } else if (x->km.state == XFRM_STATE_ERROR ||
936 x->km.state == XFRM_STATE_EXPIRED) {
937 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
938 security_xfrm_state_pol_flow_match(x, pol, fl))
944 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
945 const struct flowi *fl, struct xfrm_tmpl *tmpl,
946 struct xfrm_policy *pol, int *err,
947 unsigned short family, u32 if_id)
949 static xfrm_address_t saddr_wildcard = { };
950 struct net *net = xp_net(pol);
951 unsigned int h, h_wildcard;
952 struct xfrm_state *x, *x0, *to_put;
953 int acquire_in_progress = 0;
955 struct xfrm_state *best = NULL;
956 u32 mark = pol->mark.v & pol->mark.m;
957 unsigned short encap_family = tmpl->encap_family;
958 unsigned int sequence;
963 sequence = read_seqcount_begin(&xfrm_state_hash_generation);
966 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
967 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
968 if (x->props.family == encap_family &&
969 x->props.reqid == tmpl->reqid &&
970 (mark & x->mark.m) == x->mark.v &&
972 !(x->props.flags & XFRM_STATE_WILDRECV) &&
973 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
974 tmpl->mode == x->props.mode &&
975 tmpl->id.proto == x->id.proto &&
976 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
977 xfrm_state_look_at(pol, x, fl, encap_family,
978 &best, &acquire_in_progress, &error);
980 if (best || acquire_in_progress)
983 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
984 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
985 if (x->props.family == encap_family &&
986 x->props.reqid == tmpl->reqid &&
987 (mark & x->mark.m) == x->mark.v &&
989 !(x->props.flags & XFRM_STATE_WILDRECV) &&
990 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
991 tmpl->mode == x->props.mode &&
992 tmpl->id.proto == x->id.proto &&
993 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
994 xfrm_state_look_at(pol, x, fl, encap_family,
995 &best, &acquire_in_progress, &error);
1000 if (!x && !error && !acquire_in_progress) {
1002 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
1003 tmpl->id.proto, encap_family)) != NULL) {
1010 /* If the KMs have no listeners (yet...), avoid allocating an SA
1011 * for each and every packet - garbage collection might not
1014 if (!km_is_alive(&c)) {
1019 x = xfrm_state_alloc(net);
1024 /* Initialize temporary state matching only
1025 * to current session. */
1026 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1027 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1030 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1032 x->km.state = XFRM_STATE_DEAD;
1038 if (km_query(x, tmpl, pol) == 0) {
1039 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1040 x->km.state = XFRM_STATE_ACQ;
1041 list_add(&x->km.all, &net->xfrm.state_all);
1042 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1043 h = xfrm_src_hash(net, daddr, saddr, encap_family);
1044 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1046 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1047 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1049 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1050 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1051 net->xfrm.state_num++;
1052 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1053 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1055 x->km.state = XFRM_STATE_DEAD;
1063 if (!xfrm_state_hold_rcu(x)) {
1068 *err = acquire_in_progress ? -EAGAIN : error;
1072 xfrm_state_put(to_put);
1074 if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
1086 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1087 xfrm_address_t *daddr, xfrm_address_t *saddr,
1088 unsigned short family, u8 mode, u8 proto, u32 reqid)
1091 struct xfrm_state *rx = NULL, *x = NULL;
1093 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1094 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1095 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1096 if (x->props.family == family &&
1097 x->props.reqid == reqid &&
1098 (mark & x->mark.m) == x->mark.v &&
1099 x->if_id == if_id &&
1100 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1101 xfrm_state_addr_check(x, daddr, saddr, family) &&
1102 mode == x->props.mode &&
1103 proto == x->id.proto &&
1104 x->km.state == XFRM_STATE_VALID) {
1111 xfrm_state_hold(rx);
1112 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1117 EXPORT_SYMBOL(xfrm_stateonly_find);
1119 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1120 unsigned short family)
1122 struct xfrm_state *x;
1123 struct xfrm_state_walk *w;
1125 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1126 list_for_each_entry(w, &net->xfrm.state_all, all) {
1127 x = container_of(w, struct xfrm_state, km);
1128 if (x->props.family != family ||
1133 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1136 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1139 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1141 static void __xfrm_state_insert(struct xfrm_state *x)
1143 struct net *net = xs_net(x);
1146 list_add(&x->km.all, &net->xfrm.state_all);
1148 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1149 x->props.reqid, x->props.family);
1150 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1152 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1153 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1156 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1159 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1162 tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1163 if (x->replay_maxage)
1164 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1166 net->xfrm.state_num++;
1168 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1171 /* net->xfrm.xfrm_state_lock is held */
1172 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1174 struct net *net = xs_net(xnew);
1175 unsigned short family = xnew->props.family;
1176 u32 reqid = xnew->props.reqid;
1177 struct xfrm_state *x;
1179 u32 mark = xnew->mark.v & xnew->mark.m;
1180 u32 if_id = xnew->if_id;
1182 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1183 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1184 if (x->props.family == family &&
1185 x->props.reqid == reqid &&
1186 x->if_id == if_id &&
1187 (mark & x->mark.m) == x->mark.v &&
1188 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1189 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1194 void xfrm_state_insert(struct xfrm_state *x)
1196 struct net *net = xs_net(x);
1198 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1199 __xfrm_state_bump_genids(x);
1200 __xfrm_state_insert(x);
1201 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1203 EXPORT_SYMBOL(xfrm_state_insert);
1205 /* net->xfrm.xfrm_state_lock is held */
1206 static struct xfrm_state *__find_acq_core(struct net *net,
1207 const struct xfrm_mark *m,
1208 unsigned short family, u8 mode,
1209 u32 reqid, u32 if_id, u8 proto,
1210 const xfrm_address_t *daddr,
1211 const xfrm_address_t *saddr,
1214 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1215 struct xfrm_state *x;
1216 u32 mark = m->v & m->m;
1218 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1219 if (x->props.reqid != reqid ||
1220 x->props.mode != mode ||
1221 x->props.family != family ||
1222 x->km.state != XFRM_STATE_ACQ ||
1224 x->id.proto != proto ||
1225 (mark & x->mark.m) != x->mark.v ||
1226 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1227 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1237 x = xfrm_state_alloc(net);
1241 x->sel.daddr.a4 = daddr->a4;
1242 x->sel.saddr.a4 = saddr->a4;
1243 x->sel.prefixlen_d = 32;
1244 x->sel.prefixlen_s = 32;
1245 x->props.saddr.a4 = saddr->a4;
1246 x->id.daddr.a4 = daddr->a4;
1250 x->sel.daddr.in6 = daddr->in6;
1251 x->sel.saddr.in6 = saddr->in6;
1252 x->sel.prefixlen_d = 128;
1253 x->sel.prefixlen_s = 128;
1254 x->props.saddr.in6 = saddr->in6;
1255 x->id.daddr.in6 = daddr->in6;
1259 x->km.state = XFRM_STATE_ACQ;
1260 x->id.proto = proto;
1261 x->props.family = family;
1262 x->props.mode = mode;
1263 x->props.reqid = reqid;
1267 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1269 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1270 list_add(&x->km.all, &net->xfrm.state_all);
1271 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1272 h = xfrm_src_hash(net, daddr, saddr, family);
1273 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1275 net->xfrm.state_num++;
1277 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1283 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1285 int xfrm_state_add(struct xfrm_state *x)
1287 struct net *net = xs_net(x);
1288 struct xfrm_state *x1, *to_put;
1291 u32 mark = x->mark.v & x->mark.m;
1292 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1294 family = x->props.family;
1298 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1300 x1 = __xfrm_state_locate(x, use_spi, family);
1308 if (use_spi && x->km.seq) {
1309 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1310 if (x1 && ((x1->id.proto != x->id.proto) ||
1311 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1318 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1319 x->props.reqid, x->if_id, x->id.proto,
1320 &x->id.daddr, &x->props.saddr, 0);
1322 __xfrm_state_bump_genids(x);
1323 __xfrm_state_insert(x);
1327 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1330 xfrm_state_delete(x1);
1335 xfrm_state_put(to_put);
1339 EXPORT_SYMBOL(xfrm_state_add);
1341 #ifdef CONFIG_XFRM_MIGRATE
1342 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1343 struct xfrm_encap_tmpl *encap)
1345 struct net *net = xs_net(orig);
1346 struct xfrm_state *x = xfrm_state_alloc(net);
1350 memcpy(&x->id, &orig->id, sizeof(x->id));
1351 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1352 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1353 x->props.mode = orig->props.mode;
1354 x->props.replay_window = orig->props.replay_window;
1355 x->props.reqid = orig->props.reqid;
1356 x->props.family = orig->props.family;
1357 x->props.saddr = orig->props.saddr;
1360 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1364 x->props.aalgo = orig->props.aalgo;
1367 x->aead = xfrm_algo_aead_clone(orig->aead);
1368 x->geniv = orig->geniv;
1373 x->ealg = xfrm_algo_clone(orig->ealg);
1377 x->props.ealgo = orig->props.ealgo;
1380 x->calg = xfrm_algo_clone(orig->calg);
1384 x->props.calgo = orig->props.calgo;
1386 if (encap || orig->encap) {
1388 x->encap = kmemdup(encap, sizeof(*x->encap),
1391 x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1399 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1405 if (orig->replay_esn) {
1406 if (xfrm_replay_clone(x, orig))
1410 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1412 if (xfrm_init_state(x) < 0)
1415 x->props.flags = orig->props.flags;
1416 x->props.extra_flags = orig->props.extra_flags;
1418 x->if_id = orig->if_id;
1419 x->tfcpad = orig->tfcpad;
1420 x->replay_maxdiff = orig->replay_maxdiff;
1421 x->replay_maxage = orig->replay_maxage;
1422 x->curlft.add_time = orig->curlft.add_time;
1423 x->km.state = orig->km.state;
1424 x->km.seq = orig->km.seq;
1425 x->replay = orig->replay;
1426 x->preplay = orig->preplay;
1436 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
1439 struct xfrm_state *x = NULL;
1441 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1444 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1445 m->reqid, m->old_family);
1446 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1447 if (x->props.mode != m->mode ||
1448 x->id.proto != m->proto)
1450 if (m->reqid && x->props.reqid != m->reqid)
1452 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1454 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1461 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1463 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1464 if (x->props.mode != m->mode ||
1465 x->id.proto != m->proto)
1467 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1469 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1477 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1481 EXPORT_SYMBOL(xfrm_migrate_state_find);
1483 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1484 struct xfrm_migrate *m,
1485 struct xfrm_encap_tmpl *encap)
1487 struct xfrm_state *xc;
1489 xc = xfrm_state_clone(x, encap);
1493 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1494 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1497 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1498 /* a care is needed when the destination address of the
1499 state is to be updated as it is a part of triplet */
1500 xfrm_state_insert(xc);
1502 if (xfrm_state_add(xc) < 0)
1511 EXPORT_SYMBOL(xfrm_state_migrate);
1514 int xfrm_state_update(struct xfrm_state *x)
1516 struct xfrm_state *x1, *to_put;
1518 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1519 struct net *net = xs_net(x);
1523 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1524 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1530 if (xfrm_state_kern(x1)) {
1536 if (x1->km.state == XFRM_STATE_ACQ) {
1537 __xfrm_state_insert(x);
1543 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1546 xfrm_state_put(to_put);
1552 xfrm_state_delete(x1);
1558 spin_lock_bh(&x1->lock);
1559 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1560 if (x->encap && x1->encap &&
1561 x->encap->encap_type == x1->encap->encap_type)
1562 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1563 else if (x->encap || x1->encap)
1566 if (x->coaddr && x1->coaddr) {
1567 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1569 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1570 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1571 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1574 tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1575 if (x1->curlft.use_time)
1576 xfrm_state_check_expire(x1);
1578 if (x->props.smark.m || x->props.smark.v || x->if_id) {
1579 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1581 if (x->props.smark.m || x->props.smark.v)
1582 x1->props.smark = x->props.smark;
1585 x1->if_id = x->if_id;
1587 __xfrm_state_bump_genids(x1);
1588 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1592 x->km.state = XFRM_STATE_DEAD;
1593 __xfrm_state_put(x);
1597 spin_unlock_bh(&x1->lock);
1603 EXPORT_SYMBOL(xfrm_state_update);
1605 int xfrm_state_check_expire(struct xfrm_state *x)
1607 if (!x->curlft.use_time)
1608 x->curlft.use_time = ktime_get_real_seconds();
1610 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1611 x->curlft.packets >= x->lft.hard_packet_limit) {
1612 x->km.state = XFRM_STATE_EXPIRED;
1613 tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
1618 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1619 x->curlft.packets >= x->lft.soft_packet_limit)) {
1621 km_state_expired(x, 0, 0);
1625 EXPORT_SYMBOL(xfrm_state_check_expire);
1628 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1629 u8 proto, unsigned short family)
1631 struct xfrm_state *x;
1634 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1638 EXPORT_SYMBOL(xfrm_state_lookup);
1641 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1642 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1643 u8 proto, unsigned short family)
1645 struct xfrm_state *x;
1647 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1648 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1649 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1652 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1655 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1656 u32 if_id, u8 proto, const xfrm_address_t *daddr,
1657 const xfrm_address_t *saddr, int create, unsigned short family)
1659 struct xfrm_state *x;
1661 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1662 x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
1663 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1667 EXPORT_SYMBOL(xfrm_find_acq);
1669 #ifdef CONFIG_XFRM_SUB_POLICY
1671 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1672 unsigned short family, struct net *net)
1676 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1678 return -EAFNOSUPPORT;
1680 spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
1681 if (afinfo->tmpl_sort)
1682 err = afinfo->tmpl_sort(dst, src, n);
1684 for (i = 0; i < n; i++)
1686 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1690 EXPORT_SYMBOL(xfrm_tmpl_sort);
1693 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1694 unsigned short family)
1698 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1699 struct net *net = xs_net(*src);
1702 return -EAFNOSUPPORT;
1704 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1705 if (afinfo->state_sort)
1706 err = afinfo->state_sort(dst, src, n);
1708 for (i = 0; i < n; i++)
1710 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1714 EXPORT_SYMBOL(xfrm_state_sort);
1717 /* Silly enough, but I'm lazy to build resolution list */
1719 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1723 for (i = 0; i <= net->xfrm.state_hmask; i++) {
1724 struct xfrm_state *x;
1726 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1727 if (x->km.seq == seq &&
1728 (mark & x->mark.m) == x->mark.v &&
1729 x->km.state == XFRM_STATE_ACQ) {
1738 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1740 struct xfrm_state *x;
1742 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1743 x = __xfrm_find_acq_byseq(net, mark, seq);
1744 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1747 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1749 u32 xfrm_get_acqseq(void)
1752 static atomic_t acqseq;
1755 res = atomic_inc_return(&acqseq);
1760 EXPORT_SYMBOL(xfrm_get_acqseq);
1762 int verify_spi_info(u8 proto, u32 min, u32 max)
1770 /* IPCOMP spi is 16-bits. */
1784 EXPORT_SYMBOL(verify_spi_info);
1786 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1788 struct net *net = xs_net(x);
1790 struct xfrm_state *x0;
1792 __be32 minspi = htonl(low);
1793 __be32 maxspi = htonl(high);
1794 u32 mark = x->mark.v & x->mark.m;
1796 spin_lock_bh(&x->lock);
1797 if (x->km.state == XFRM_STATE_DEAD)
1806 if (minspi == maxspi) {
1807 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1815 for (h = 0; h < high-low+1; h++) {
1816 spi = low + prandom_u32()%(high-low+1);
1817 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1819 x->id.spi = htonl(spi);
1826 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1827 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1828 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1829 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1835 spin_unlock_bh(&x->lock);
1839 EXPORT_SYMBOL(xfrm_alloc_spi);
1841 static bool __xfrm_state_filter_match(struct xfrm_state *x,
1842 struct xfrm_address_filter *filter)
1845 if ((filter->family == AF_INET ||
1846 filter->family == AF_INET6) &&
1847 x->props.family != filter->family)
1850 return addr_match(&x->props.saddr, &filter->saddr,
1852 addr_match(&x->id.daddr, &filter->daddr,
1858 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1859 int (*func)(struct xfrm_state *, int, void*),
1862 struct xfrm_state *state;
1863 struct xfrm_state_walk *x;
1866 if (walk->seq != 0 && list_empty(&walk->all))
1869 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1870 if (list_empty(&walk->all))
1871 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
1873 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
1874 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
1875 if (x->state == XFRM_STATE_DEAD)
1877 state = container_of(x, struct xfrm_state, km);
1878 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1880 if (!__xfrm_state_filter_match(state, walk->filter))
1882 err = func(state, walk->seq, data);
1884 list_move_tail(&walk->all, &x->all);
1889 if (walk->seq == 0) {
1893 list_del_init(&walk->all);
1895 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1898 EXPORT_SYMBOL(xfrm_state_walk);
1900 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1901 struct xfrm_address_filter *filter)
1903 INIT_LIST_HEAD(&walk->all);
1904 walk->proto = proto;
1905 walk->state = XFRM_STATE_DEAD;
1907 walk->filter = filter;
1909 EXPORT_SYMBOL(xfrm_state_walk_init);
1911 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
1913 kfree(walk->filter);
1915 if (list_empty(&walk->all))
1918 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1919 list_del(&walk->all);
1920 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1922 EXPORT_SYMBOL(xfrm_state_walk_done);
1924 static void xfrm_replay_timer_handler(struct timer_list *t)
1926 struct xfrm_state *x = from_timer(x, t, rtimer);
1928 spin_lock(&x->lock);
1930 if (x->km.state == XFRM_STATE_VALID) {
1931 if (xfrm_aevent_is_on(xs_net(x)))
1932 x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
1934 x->xflags |= XFRM_TIME_DEFER;
1937 spin_unlock(&x->lock);
1940 static LIST_HEAD(xfrm_km_list);
1942 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
1944 struct xfrm_mgr *km;
1947 list_for_each_entry_rcu(km, &xfrm_km_list, list)
1948 if (km->notify_policy)
1949 km->notify_policy(xp, dir, c);
1953 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1955 struct xfrm_mgr *km;
1957 list_for_each_entry_rcu(km, &xfrm_km_list, list)
1963 EXPORT_SYMBOL(km_policy_notify);
1964 EXPORT_SYMBOL(km_state_notify);
1966 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
1972 c.event = XFRM_MSG_EXPIRE;
1973 km_state_notify(x, &c);
1976 EXPORT_SYMBOL(km_state_expired);
1978 * We send to all registered managers regardless of failure
1979 * We are happy with one success
1981 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1983 int err = -EINVAL, acqret;
1984 struct xfrm_mgr *km;
1987 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1988 acqret = km->acquire(x, t, pol);
1995 EXPORT_SYMBOL(km_query);
1997 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2000 struct xfrm_mgr *km;
2003 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2004 if (km->new_mapping)
2005 err = km->new_mapping(x, ipaddr, sport);
2012 EXPORT_SYMBOL(km_new_mapping);
2014 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2020 c.event = XFRM_MSG_POLEXPIRE;
2021 km_policy_notify(pol, dir, &c);
2023 EXPORT_SYMBOL(km_policy_expired);
2025 #ifdef CONFIG_XFRM_MIGRATE
2026 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2027 const struct xfrm_migrate *m, int num_migrate,
2028 const struct xfrm_kmaddress *k,
2029 const struct xfrm_encap_tmpl *encap)
2033 struct xfrm_mgr *km;
2036 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2038 ret = km->migrate(sel, dir, type, m, num_migrate, k,
2047 EXPORT_SYMBOL(km_migrate);
2050 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2054 struct xfrm_mgr *km;
2057 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2059 ret = km->report(net, proto, sel, addr);
2067 EXPORT_SYMBOL(km_report);
2069 bool km_is_alive(const struct km_event *c)
2071 struct xfrm_mgr *km;
2072 bool is_alive = false;
2075 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2076 if (km->is_alive && km->is_alive(c)) {
2085 EXPORT_SYMBOL(km_is_alive);
2087 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
2091 struct xfrm_mgr *km;
2092 struct xfrm_policy *pol = NULL;
2094 if (in_compat_syscall())
2097 if (!optval && !optlen) {
2098 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2099 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2104 if (optlen <= 0 || optlen > PAGE_SIZE)
2107 data = memdup_user(optval, optlen);
2109 return PTR_ERR(data);
2113 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2114 pol = km->compile_policy(sk, optname, data,
2122 xfrm_sk_policy_insert(sk, err, pol);
2131 EXPORT_SYMBOL(xfrm_user_policy);
2133 static DEFINE_SPINLOCK(xfrm_km_lock);
2135 int xfrm_register_km(struct xfrm_mgr *km)
2137 spin_lock_bh(&xfrm_km_lock);
2138 list_add_tail_rcu(&km->list, &xfrm_km_list);
2139 spin_unlock_bh(&xfrm_km_lock);
2142 EXPORT_SYMBOL(xfrm_register_km);
2144 int xfrm_unregister_km(struct xfrm_mgr *km)
2146 spin_lock_bh(&xfrm_km_lock);
2147 list_del_rcu(&km->list);
2148 spin_unlock_bh(&xfrm_km_lock);
2152 EXPORT_SYMBOL(xfrm_unregister_km);
2154 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2158 if (WARN_ON(afinfo->family >= NPROTO))
2159 return -EAFNOSUPPORT;
2161 spin_lock_bh(&xfrm_state_afinfo_lock);
2162 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2165 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2166 spin_unlock_bh(&xfrm_state_afinfo_lock);
2169 EXPORT_SYMBOL(xfrm_state_register_afinfo);
2171 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2173 int err = 0, family = afinfo->family;
2175 if (WARN_ON(family >= NPROTO))
2176 return -EAFNOSUPPORT;
2178 spin_lock_bh(&xfrm_state_afinfo_lock);
2179 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2180 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2183 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2185 spin_unlock_bh(&xfrm_state_afinfo_lock);
2189 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2191 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2193 if (unlikely(family >= NPROTO))
2196 return rcu_dereference(xfrm_state_afinfo[family]);
2199 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2201 struct xfrm_state_afinfo *afinfo;
2202 if (unlikely(family >= NPROTO))
2205 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2206 if (unlikely(!afinfo))
2211 void xfrm_flush_gc(void)
2213 flush_work(&xfrm_state_gc_work);
2215 EXPORT_SYMBOL(xfrm_flush_gc);
2217 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2218 void xfrm_state_delete_tunnel(struct xfrm_state *x)
2221 struct xfrm_state *t = x->tunnel;
2223 if (atomic_read(&t->tunnel_users) == 2)
2224 xfrm_state_delete(t);
2225 atomic_dec(&t->tunnel_users);
2226 xfrm_state_put_sync(t);
2230 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2232 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2234 const struct xfrm_type *type = READ_ONCE(x->type);
2236 if (x->km.state == XFRM_STATE_VALID &&
2237 type && type->get_mtu)
2238 return type->get_mtu(x, mtu);
2240 return mtu - x->props.header_len;
2243 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2245 struct xfrm_state_afinfo *afinfo;
2246 struct xfrm_mode *inner_mode;
2247 int family = x->props.family;
2250 err = -EAFNOSUPPORT;
2251 afinfo = xfrm_state_get_afinfo(family);
2256 if (afinfo->init_flags)
2257 err = afinfo->init_flags(x);
2264 err = -EPROTONOSUPPORT;
2266 if (x->sel.family != AF_UNSPEC) {
2267 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2268 if (inner_mode == NULL)
2271 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2272 family != x->sel.family) {
2273 xfrm_put_mode(inner_mode);
2277 x->inner_mode = inner_mode;
2279 struct xfrm_mode *inner_mode_iaf;
2280 int iafamily = AF_INET;
2282 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2283 if (inner_mode == NULL)
2286 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2287 xfrm_put_mode(inner_mode);
2290 x->inner_mode = inner_mode;
2292 if (x->props.family == AF_INET)
2293 iafamily = AF_INET6;
2295 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2296 if (inner_mode_iaf) {
2297 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
2298 x->inner_mode_iaf = inner_mode_iaf;
2300 xfrm_put_mode(inner_mode_iaf);
2304 x->type = xfrm_get_type(x->id.proto, family);
2305 if (x->type == NULL)
2308 x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
2310 err = x->type->init_state(x);
2314 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2315 if (x->outer_mode == NULL) {
2316 err = -EPROTONOSUPPORT;
2321 err = xfrm_init_replay(x);
2330 EXPORT_SYMBOL(__xfrm_init_state);
2332 int xfrm_init_state(struct xfrm_state *x)
2336 err = __xfrm_init_state(x, true, false);
2338 x->km.state = XFRM_STATE_VALID;
2343 EXPORT_SYMBOL(xfrm_init_state);
2345 int __net_init xfrm_state_init(struct net *net)
2349 if (net_eq(net, &init_net))
2350 xfrm_state_cache = KMEM_CACHE(xfrm_state,
2351 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2353 INIT_LIST_HEAD(&net->xfrm.state_all);
2355 sz = sizeof(struct hlist_head) * 8;
2357 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2358 if (!net->xfrm.state_bydst)
2360 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2361 if (!net->xfrm.state_bysrc)
2363 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2364 if (!net->xfrm.state_byspi)
2366 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2368 net->xfrm.state_num = 0;
2369 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2370 spin_lock_init(&net->xfrm.xfrm_state_lock);
2374 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2376 xfrm_hash_free(net->xfrm.state_bydst, sz);
2381 void xfrm_state_fini(struct net *net)
2385 flush_work(&net->xfrm.state_hash_work);
2386 flush_work(&xfrm_state_gc_work);
2387 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
2389 WARN_ON(!list_empty(&net->xfrm.state_all));
2391 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2392 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2393 xfrm_hash_free(net->xfrm.state_byspi, sz);
2394 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2395 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2396 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2397 xfrm_hash_free(net->xfrm.state_bydst, sz);
2400 #ifdef CONFIG_AUDITSYSCALL
2401 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2402 struct audit_buffer *audit_buf)
2404 struct xfrm_sec_ctx *ctx = x->security;
2405 u32 spi = ntohl(x->id.spi);
2408 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2409 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2411 switch (x->props.family) {
2413 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2414 &x->props.saddr.a4, &x->id.daddr.a4);
2417 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2418 x->props.saddr.a6, x->id.daddr.a6);
2422 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2425 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2426 struct audit_buffer *audit_buf)
2428 const struct iphdr *iph4;
2429 const struct ipv6hdr *iph6;
2434 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2435 &iph4->saddr, &iph4->daddr);
2438 iph6 = ipv6_hdr(skb);
2439 audit_log_format(audit_buf,
2440 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2441 &iph6->saddr, &iph6->daddr,
2442 iph6->flow_lbl[0] & 0x0f,
2449 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2451 struct audit_buffer *audit_buf;
2453 audit_buf = xfrm_audit_start("SAD-add");
2454 if (audit_buf == NULL)
2456 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2457 xfrm_audit_helper_sainfo(x, audit_buf);
2458 audit_log_format(audit_buf, " res=%u", result);
2459 audit_log_end(audit_buf);
2461 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2463 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2465 struct audit_buffer *audit_buf;
2467 audit_buf = xfrm_audit_start("SAD-delete");
2468 if (audit_buf == NULL)
2470 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2471 xfrm_audit_helper_sainfo(x, audit_buf);
2472 audit_log_format(audit_buf, " res=%u", result);
2473 audit_log_end(audit_buf);
2475 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2477 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2478 struct sk_buff *skb)
2480 struct audit_buffer *audit_buf;
2483 audit_buf = xfrm_audit_start("SA-replay-overflow");
2484 if (audit_buf == NULL)
2486 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2487 /* don't record the sequence number because it's inherent in this kind
2488 * of audit message */
2489 spi = ntohl(x->id.spi);
2490 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2491 audit_log_end(audit_buf);
2493 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2495 void xfrm_audit_state_replay(struct xfrm_state *x,
2496 struct sk_buff *skb, __be32 net_seq)
2498 struct audit_buffer *audit_buf;
2501 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2502 if (audit_buf == NULL)
2504 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2505 spi = ntohl(x->id.spi);
2506 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2507 spi, spi, ntohl(net_seq));
2508 audit_log_end(audit_buf);
2510 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2512 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2514 struct audit_buffer *audit_buf;
2516 audit_buf = xfrm_audit_start("SA-notfound");
2517 if (audit_buf == NULL)
2519 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2520 audit_log_end(audit_buf);
2522 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2524 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2525 __be32 net_spi, __be32 net_seq)
2527 struct audit_buffer *audit_buf;
2530 audit_buf = xfrm_audit_start("SA-notfound");
2531 if (audit_buf == NULL)
2533 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2534 spi = ntohl(net_spi);
2535 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2536 spi, spi, ntohl(net_seq));
2537 audit_log_end(audit_buf);
2539 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2541 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2542 struct sk_buff *skb, u8 proto)
2544 struct audit_buffer *audit_buf;
2548 audit_buf = xfrm_audit_start("SA-icv-failure");
2549 if (audit_buf == NULL)
2551 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2552 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2553 u32 spi = ntohl(net_spi);
2554 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2555 spi, spi, ntohl(net_seq));
2557 audit_log_end(audit_buf);
2559 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2560 #endif /* CONFIG_AUDITSYSCALL */