6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
32 #ifdef CONFIG_XFRM_STATISTICS
36 #include "xfrm_hash.h"
38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40 #define XFRM_MAX_QUEUE_LEN 100
42 static struct dst_entry *xfrm_policy_sk_bundles;
44 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
45 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
48 static struct kmem_cache *xfrm_dst_cache __read_mostly;
50 static void xfrm_init_pmtu(struct dst_entry *dst);
51 static int stale_bundle(struct dst_entry *dst);
52 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
53 static void xfrm_policy_queue_process(unsigned long arg);
55 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
59 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
61 const struct flowi4 *fl4 = &fl->u.ip4;
63 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
64 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
65 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
66 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
67 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
68 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
72 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
74 const struct flowi6 *fl6 = &fl->u.ip6;
76 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
77 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
78 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
79 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
80 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
81 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
84 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
85 unsigned short family)
89 return __xfrm4_selector_match(sel, fl);
91 return __xfrm6_selector_match(sel, fl);
96 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
98 struct xfrm_policy_afinfo *afinfo;
100 if (unlikely(family >= NPROTO))
103 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
104 if (unlikely(!afinfo))
109 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
114 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
115 const xfrm_address_t *saddr,
116 const xfrm_address_t *daddr,
119 struct xfrm_policy_afinfo *afinfo;
120 struct dst_entry *dst;
122 afinfo = xfrm_policy_get_afinfo(family);
123 if (unlikely(afinfo == NULL))
124 return ERR_PTR(-EAFNOSUPPORT);
126 dst = afinfo->dst_lookup(net, tos, saddr, daddr);
128 xfrm_policy_put_afinfo(afinfo);
133 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
134 xfrm_address_t *prev_saddr,
135 xfrm_address_t *prev_daddr,
138 struct net *net = xs_net(x);
139 xfrm_address_t *saddr = &x->props.saddr;
140 xfrm_address_t *daddr = &x->id.daddr;
141 struct dst_entry *dst;
143 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
147 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
152 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
155 if (prev_saddr != saddr)
156 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
157 if (prev_daddr != daddr)
158 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
164 static inline unsigned long make_jiffies(long secs)
166 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
167 return MAX_SCHEDULE_TIMEOUT-1;
172 static void xfrm_policy_timer(unsigned long data)
174 struct xfrm_policy *xp = (struct xfrm_policy*)data;
175 unsigned long now = get_seconds();
176 long next = LONG_MAX;
180 read_lock(&xp->lock);
182 if (unlikely(xp->walk.dead))
185 dir = xfrm_policy_id2dir(xp->index);
187 if (xp->lft.hard_add_expires_seconds) {
188 long tmo = xp->lft.hard_add_expires_seconds +
189 xp->curlft.add_time - now;
195 if (xp->lft.hard_use_expires_seconds) {
196 long tmo = xp->lft.hard_use_expires_seconds +
197 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
203 if (xp->lft.soft_add_expires_seconds) {
204 long tmo = xp->lft.soft_add_expires_seconds +
205 xp->curlft.add_time - now;
208 tmo = XFRM_KM_TIMEOUT;
213 if (xp->lft.soft_use_expires_seconds) {
214 long tmo = xp->lft.soft_use_expires_seconds +
215 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
218 tmo = XFRM_KM_TIMEOUT;
225 km_policy_expired(xp, dir, 0, 0);
226 if (next != LONG_MAX &&
227 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
231 read_unlock(&xp->lock);
236 read_unlock(&xp->lock);
237 if (!xfrm_policy_delete(xp, dir))
238 km_policy_expired(xp, dir, 1, 0);
242 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
244 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
246 if (unlikely(pol->walk.dead))
254 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
256 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
258 return !pol->walk.dead;
261 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
263 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
266 static const struct flow_cache_ops xfrm_policy_fc_ops = {
267 .get = xfrm_policy_flo_get,
268 .check = xfrm_policy_flo_check,
269 .delete = xfrm_policy_flo_delete,
272 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
276 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
278 struct xfrm_policy *policy;
280 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
283 write_pnet(&policy->xp_net, net);
284 INIT_LIST_HEAD(&policy->walk.all);
285 INIT_HLIST_NODE(&policy->bydst);
286 INIT_HLIST_NODE(&policy->byidx);
287 rwlock_init(&policy->lock);
288 atomic_set(&policy->refcnt, 1);
289 skb_queue_head_init(&policy->polq.hold_queue);
290 setup_timer(&policy->timer, xfrm_policy_timer,
291 (unsigned long)policy);
292 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
293 (unsigned long)policy);
294 policy->flo.ops = &xfrm_policy_fc_ops;
298 EXPORT_SYMBOL(xfrm_policy_alloc);
300 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
302 void xfrm_policy_destroy(struct xfrm_policy *policy)
304 BUG_ON(!policy->walk.dead);
306 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
309 security_xfrm_policy_free(policy->security);
312 EXPORT_SYMBOL(xfrm_policy_destroy);
314 static void xfrm_queue_purge(struct sk_buff_head *list)
318 while ((skb = skb_dequeue(list)) != NULL)
322 /* Rule must be locked. Release descentant resources, announce
323 * entry dead. The rule must be unlinked from lists to the moment.
326 static void xfrm_policy_kill(struct xfrm_policy *policy)
328 policy->walk.dead = 1;
330 atomic_inc(&policy->genid);
332 if (del_timer(&policy->polq.hold_timer))
333 xfrm_pol_put(policy);
334 xfrm_queue_purge(&policy->polq.hold_queue);
336 if (del_timer(&policy->timer))
337 xfrm_pol_put(policy);
339 xfrm_pol_put(policy);
342 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
344 static inline unsigned int idx_hash(struct net *net, u32 index)
346 return __idx_hash(index, net->xfrm.policy_idx_hmask);
349 static struct hlist_head *policy_hash_bysel(struct net *net,
350 const struct xfrm_selector *sel,
351 unsigned short family, int dir)
353 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
354 unsigned int hash = __sel_hash(sel, family, hmask);
356 return (hash == hmask + 1 ?
357 &net->xfrm.policy_inexact[dir] :
358 net->xfrm.policy_bydst[dir].table + hash);
361 static struct hlist_head *policy_hash_direct(struct net *net,
362 const xfrm_address_t *daddr,
363 const xfrm_address_t *saddr,
364 unsigned short family, int dir)
366 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
367 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
369 return net->xfrm.policy_bydst[dir].table + hash;
372 static void xfrm_dst_hash_transfer(struct hlist_head *list,
373 struct hlist_head *ndsttable,
374 unsigned int nhashmask)
376 struct hlist_node *tmp, *entry0 = NULL;
377 struct xfrm_policy *pol;
381 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
384 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
385 pol->family, nhashmask);
387 hlist_del(&pol->bydst);
388 hlist_add_head(&pol->bydst, ndsttable+h);
393 hlist_del(&pol->bydst);
394 hlist_add_after(entry0, &pol->bydst);
396 entry0 = &pol->bydst;
398 if (!hlist_empty(list)) {
404 static void xfrm_idx_hash_transfer(struct hlist_head *list,
405 struct hlist_head *nidxtable,
406 unsigned int nhashmask)
408 struct hlist_node *tmp;
409 struct xfrm_policy *pol;
411 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
414 h = __idx_hash(pol->index, nhashmask);
415 hlist_add_head(&pol->byidx, nidxtable+h);
419 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
421 return ((old_hmask + 1) << 1) - 1;
424 static void xfrm_bydst_resize(struct net *net, int dir)
426 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
427 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
428 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
429 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
430 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
436 write_lock_bh(&net->xfrm.xfrm_policy_lock);
438 for (i = hmask; i >= 0; i--)
439 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
441 net->xfrm.policy_bydst[dir].table = ndst;
442 net->xfrm.policy_bydst[dir].hmask = nhashmask;
444 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
446 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
449 static void xfrm_byidx_resize(struct net *net, int total)
451 unsigned int hmask = net->xfrm.policy_idx_hmask;
452 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
453 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
454 struct hlist_head *oidx = net->xfrm.policy_byidx;
455 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
461 write_lock_bh(&net->xfrm.xfrm_policy_lock);
463 for (i = hmask; i >= 0; i--)
464 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
466 net->xfrm.policy_byidx = nidx;
467 net->xfrm.policy_idx_hmask = nhashmask;
469 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
471 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
474 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
476 unsigned int cnt = net->xfrm.policy_count[dir];
477 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
482 if ((hmask + 1) < xfrm_policy_hashmax &&
489 static inline int xfrm_byidx_should_resize(struct net *net, int total)
491 unsigned int hmask = net->xfrm.policy_idx_hmask;
493 if ((hmask + 1) < xfrm_policy_hashmax &&
500 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
502 read_lock_bh(&net->xfrm.xfrm_policy_lock);
503 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
504 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
505 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
506 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
507 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
508 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
509 si->spdhcnt = net->xfrm.policy_idx_hmask;
510 si->spdhmcnt = xfrm_policy_hashmax;
511 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
513 EXPORT_SYMBOL(xfrm_spd_getinfo);
515 static DEFINE_MUTEX(hash_resize_mutex);
516 static void xfrm_hash_resize(struct work_struct *work)
518 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
521 mutex_lock(&hash_resize_mutex);
524 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
525 if (xfrm_bydst_should_resize(net, dir, &total))
526 xfrm_bydst_resize(net, dir);
528 if (xfrm_byidx_should_resize(net, total))
529 xfrm_byidx_resize(net, total);
531 mutex_unlock(&hash_resize_mutex);
534 /* Generate new index... KAME seems to generate them ordered by cost
535 * of an absolute inpredictability of ordering of rules. This will not pass. */
536 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
538 static u32 idx_generator;
541 struct hlist_head *list;
542 struct xfrm_policy *p;
547 idx = (idx_generator | dir);
556 list = net->xfrm.policy_byidx + idx_hash(net, idx);
558 hlist_for_each_entry(p, list, byidx) {
559 if (p->index == idx) {
569 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
571 u32 *p1 = (u32 *) s1;
572 u32 *p2 = (u32 *) s2;
573 int len = sizeof(struct xfrm_selector) / sizeof(u32);
576 for (i = 0; i < len; i++) {
584 static void xfrm_policy_requeue(struct xfrm_policy *old,
585 struct xfrm_policy *new)
587 struct xfrm_policy_queue *pq = &old->polq;
588 struct sk_buff_head list;
590 __skb_queue_head_init(&list);
592 spin_lock_bh(&pq->hold_queue.lock);
593 skb_queue_splice_init(&pq->hold_queue, &list);
594 if (del_timer(&pq->hold_timer))
596 spin_unlock_bh(&pq->hold_queue.lock);
598 if (skb_queue_empty(&list))
603 spin_lock_bh(&pq->hold_queue.lock);
604 skb_queue_splice(&list, &pq->hold_queue);
605 pq->timeout = XFRM_QUEUE_TMO_MIN;
606 if (!mod_timer(&pq->hold_timer, jiffies))
608 spin_unlock_bh(&pq->hold_queue.lock);
611 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
612 struct xfrm_policy *pol)
614 u32 mark = policy->mark.v & policy->mark.m;
616 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
619 if ((mark & pol->mark.m) == pol->mark.v &&
620 policy->priority == pol->priority)
626 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
628 struct net *net = xp_net(policy);
629 struct xfrm_policy *pol;
630 struct xfrm_policy *delpol;
631 struct hlist_head *chain;
632 struct hlist_node *newpos;
634 write_lock_bh(&net->xfrm.xfrm_policy_lock);
635 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
638 hlist_for_each_entry(pol, chain, bydst) {
639 if (pol->type == policy->type &&
640 !selector_cmp(&pol->selector, &policy->selector) &&
641 xfrm_policy_mark_match(policy, pol) &&
642 xfrm_sec_ctx_match(pol->security, policy->security) &&
645 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
649 if (policy->priority > pol->priority)
651 } else if (policy->priority >= pol->priority) {
652 newpos = &pol->bydst;
659 hlist_add_after(newpos, &policy->bydst);
661 hlist_add_head(&policy->bydst, chain);
662 xfrm_pol_hold(policy);
663 net->xfrm.policy_count[dir]++;
664 atomic_inc(&flow_cache_genid);
666 /* After previous checking, family can either be AF_INET or AF_INET6 */
667 if (policy->family == AF_INET)
668 rt_genid_bump_ipv4(net);
670 rt_genid_bump_ipv6(net);
673 xfrm_policy_requeue(delpol, policy);
674 __xfrm_policy_unlink(delpol, dir);
676 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
677 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
678 policy->curlft.add_time = get_seconds();
679 policy->curlft.use_time = 0;
680 if (!mod_timer(&policy->timer, jiffies + HZ))
681 xfrm_pol_hold(policy);
682 list_add(&policy->walk.all, &net->xfrm.policy_all);
683 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
686 xfrm_policy_kill(delpol);
687 else if (xfrm_bydst_should_resize(net, dir, NULL))
688 schedule_work(&net->xfrm.policy_hash_work);
692 EXPORT_SYMBOL(xfrm_policy_insert);
694 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
695 int dir, struct xfrm_selector *sel,
696 struct xfrm_sec_ctx *ctx, int delete,
699 struct xfrm_policy *pol, *ret;
700 struct hlist_head *chain;
703 write_lock_bh(&net->xfrm.xfrm_policy_lock);
704 chain = policy_hash_bysel(net, sel, sel->family, dir);
706 hlist_for_each_entry(pol, chain, bydst) {
707 if (pol->type == type &&
708 (mark & pol->mark.m) == pol->mark.v &&
709 !selector_cmp(sel, &pol->selector) &&
710 xfrm_sec_ctx_match(ctx, pol->security)) {
713 *err = security_xfrm_policy_delete(
716 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
719 __xfrm_policy_unlink(pol, dir);
725 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
728 xfrm_policy_kill(ret);
731 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
733 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
734 int dir, u32 id, int delete, int *err)
736 struct xfrm_policy *pol, *ret;
737 struct hlist_head *chain;
740 if (xfrm_policy_id2dir(id) != dir)
744 write_lock_bh(&net->xfrm.xfrm_policy_lock);
745 chain = net->xfrm.policy_byidx + idx_hash(net, id);
747 hlist_for_each_entry(pol, chain, byidx) {
748 if (pol->type == type && pol->index == id &&
749 (mark & pol->mark.m) == pol->mark.v) {
752 *err = security_xfrm_policy_delete(
755 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
758 __xfrm_policy_unlink(pol, dir);
764 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
767 xfrm_policy_kill(ret);
770 EXPORT_SYMBOL(xfrm_policy_byid);
772 #ifdef CONFIG_SECURITY_NETWORK_XFRM
774 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
778 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
779 struct xfrm_policy *pol;
782 hlist_for_each_entry(pol,
783 &net->xfrm.policy_inexact[dir], bydst) {
784 if (pol->type != type)
786 err = security_xfrm_policy_delete(pol->security);
788 xfrm_audit_policy_delete(pol, 0,
789 audit_info->loginuid,
790 audit_info->sessionid,
795 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
796 hlist_for_each_entry(pol,
797 net->xfrm.policy_bydst[dir].table + i,
799 if (pol->type != type)
801 err = security_xfrm_policy_delete(
804 xfrm_audit_policy_delete(pol, 0,
805 audit_info->loginuid,
806 audit_info->sessionid,
817 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
823 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
825 int dir, err = 0, cnt = 0;
827 write_lock_bh(&net->xfrm.xfrm_policy_lock);
829 err = xfrm_policy_flush_secctx_check(net, type, audit_info);
833 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
834 struct xfrm_policy *pol;
838 hlist_for_each_entry(pol,
839 &net->xfrm.policy_inexact[dir], bydst) {
840 if (pol->type != type)
842 __xfrm_policy_unlink(pol, dir);
843 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
846 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
847 audit_info->sessionid,
850 xfrm_policy_kill(pol);
852 write_lock_bh(&net->xfrm.xfrm_policy_lock);
856 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
858 hlist_for_each_entry(pol,
859 net->xfrm.policy_bydst[dir].table + i,
861 if (pol->type != type)
863 __xfrm_policy_unlink(pol, dir);
864 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
867 xfrm_audit_policy_delete(pol, 1,
868 audit_info->loginuid,
869 audit_info->sessionid,
871 xfrm_policy_kill(pol);
873 write_lock_bh(&net->xfrm.xfrm_policy_lock);
882 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
885 EXPORT_SYMBOL(xfrm_policy_flush);
887 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
888 int (*func)(struct xfrm_policy *, int, int, void*),
891 struct xfrm_policy *pol;
892 struct xfrm_policy_walk_entry *x;
895 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
896 walk->type != XFRM_POLICY_TYPE_ANY)
899 if (list_empty(&walk->walk.all) && walk->seq != 0)
902 write_lock_bh(&net->xfrm.xfrm_policy_lock);
903 if (list_empty(&walk->walk.all))
904 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
906 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
907 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
910 pol = container_of(x, struct xfrm_policy, walk);
911 if (walk->type != XFRM_POLICY_TYPE_ANY &&
912 walk->type != pol->type)
914 error = func(pol, xfrm_policy_id2dir(pol->index),
917 list_move_tail(&walk->walk.all, &x->all);
922 if (walk->seq == 0) {
926 list_del_init(&walk->walk.all);
928 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
931 EXPORT_SYMBOL(xfrm_policy_walk);
933 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
935 INIT_LIST_HEAD(&walk->walk.all);
940 EXPORT_SYMBOL(xfrm_policy_walk_init);
942 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
944 if (list_empty(&walk->walk.all))
947 write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
948 list_del(&walk->walk.all);
949 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
951 EXPORT_SYMBOL(xfrm_policy_walk_done);
954 * Find policy to apply to this flow.
956 * Returns 0 if policy found, else an -errno.
958 static int xfrm_policy_match(const struct xfrm_policy *pol,
959 const struct flowi *fl,
960 u8 type, u16 family, int dir)
962 const struct xfrm_selector *sel = &pol->selector;
966 if (pol->family != family ||
967 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
971 match = xfrm_selector_match(sel, fl, family);
973 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
979 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
980 const struct flowi *fl,
984 struct xfrm_policy *pol, *ret;
985 const xfrm_address_t *daddr, *saddr;
986 struct hlist_head *chain;
989 daddr = xfrm_flowi_daddr(fl, family);
990 saddr = xfrm_flowi_saddr(fl, family);
991 if (unlikely(!daddr || !saddr))
994 read_lock_bh(&net->xfrm.xfrm_policy_lock);
995 chain = policy_hash_direct(net, daddr, saddr, family, dir);
997 hlist_for_each_entry(pol, chain, bydst) {
998 err = xfrm_policy_match(pol, fl, type, family, dir);
1008 priority = ret->priority;
1012 chain = &net->xfrm.policy_inexact[dir];
1013 hlist_for_each_entry(pol, chain, bydst) {
1014 err = xfrm_policy_match(pol, fl, type, family, dir);
1022 } else if (pol->priority < priority) {
1030 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1035 static struct xfrm_policy *
1036 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1038 #ifdef CONFIG_XFRM_SUB_POLICY
1039 struct xfrm_policy *pol;
1041 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1045 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1048 static int flow_to_policy_dir(int dir)
1050 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1051 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1052 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1058 return XFRM_POLICY_IN;
1060 return XFRM_POLICY_OUT;
1062 return XFRM_POLICY_FWD;
1066 static struct flow_cache_object *
1067 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1068 u8 dir, struct flow_cache_object *old_obj, void *ctx)
1070 struct xfrm_policy *pol;
1073 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1075 pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1076 if (IS_ERR_OR_NULL(pol))
1077 return ERR_CAST(pol);
1079 /* Resolver returns two references:
1080 * one for cache and one for caller of flow_cache_lookup() */
1086 static inline int policy_to_flow_dir(int dir)
1088 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1089 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1090 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1094 case XFRM_POLICY_IN:
1096 case XFRM_POLICY_OUT:
1097 return FLOW_DIR_OUT;
1098 case XFRM_POLICY_FWD:
1099 return FLOW_DIR_FWD;
1103 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1104 const struct flowi *fl)
1106 struct xfrm_policy *pol;
1107 struct net *net = sock_net(sk);
1109 read_lock_bh(&net->xfrm.xfrm_policy_lock);
1110 if ((pol = sk->sk_policy[dir]) != NULL) {
1111 bool match = xfrm_selector_match(&pol->selector, fl,
1116 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1120 err = security_xfrm_policy_lookup(pol->security,
1122 policy_to_flow_dir(dir));
1125 else if (err == -ESRCH)
1133 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1137 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1139 struct net *net = xp_net(pol);
1140 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1143 list_add(&pol->walk.all, &net->xfrm.policy_all);
1144 hlist_add_head(&pol->bydst, chain);
1145 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1146 net->xfrm.policy_count[dir]++;
1149 if (xfrm_bydst_should_resize(net, dir, NULL))
1150 schedule_work(&net->xfrm.policy_hash_work);
1153 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1156 struct net *net = xp_net(pol);
1158 if (hlist_unhashed(&pol->bydst))
1161 hlist_del(&pol->bydst);
1162 hlist_del(&pol->byidx);
1163 list_del(&pol->walk.all);
1164 net->xfrm.policy_count[dir]--;
1169 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1171 struct net *net = xp_net(pol);
1173 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1174 pol = __xfrm_policy_unlink(pol, dir);
1175 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1177 xfrm_policy_kill(pol);
1182 EXPORT_SYMBOL(xfrm_policy_delete);
1184 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1186 struct net *net = xp_net(pol);
1187 struct xfrm_policy *old_pol;
1189 #ifdef CONFIG_XFRM_SUB_POLICY
1190 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1194 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1195 old_pol = sk->sk_policy[dir];
1196 sk->sk_policy[dir] = pol;
1198 pol->curlft.add_time = get_seconds();
1199 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1200 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1204 xfrm_policy_requeue(old_pol, pol);
1206 /* Unlinking succeeds always. This is the only function
1207 * allowed to delete or replace socket policy.
1209 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1211 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1214 xfrm_policy_kill(old_pol);
1219 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1221 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1222 struct net *net = xp_net(old);
1225 newp->selector = old->selector;
1226 if (security_xfrm_policy_clone(old->security,
1229 return NULL; /* ENOMEM */
1231 newp->lft = old->lft;
1232 newp->curlft = old->curlft;
1233 newp->mark = old->mark;
1234 newp->action = old->action;
1235 newp->flags = old->flags;
1236 newp->xfrm_nr = old->xfrm_nr;
1237 newp->index = old->index;
1238 newp->type = old->type;
1239 memcpy(newp->xfrm_vec, old->xfrm_vec,
1240 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1241 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1242 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1243 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1249 int __xfrm_sk_clone_policy(struct sock *sk)
1251 struct xfrm_policy *p0 = sk->sk_policy[0],
1252 *p1 = sk->sk_policy[1];
1254 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1255 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1257 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1263 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1264 unsigned short family)
1267 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1269 if (unlikely(afinfo == NULL))
1271 err = afinfo->get_saddr(net, local, remote);
1272 xfrm_policy_put_afinfo(afinfo);
1276 /* Resolve list of templates for the flow, given policy. */
1279 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1280 struct xfrm_state **xfrm, unsigned short family)
1282 struct net *net = xp_net(policy);
1285 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1286 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1289 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1290 struct xfrm_state *x;
1291 xfrm_address_t *remote = daddr;
1292 xfrm_address_t *local = saddr;
1293 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1295 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1296 tmpl->mode == XFRM_MODE_BEET) {
1297 remote = &tmpl->id.daddr;
1298 local = &tmpl->saddr;
1299 if (xfrm_addr_any(local, tmpl->encap_family)) {
1300 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1307 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1309 if (x && x->km.state == XFRM_STATE_VALID) {
1316 error = (x->km.state == XFRM_STATE_ERROR ?
1320 else if (error == -ESRCH)
1323 if (!tmpl->optional)
1329 for (nx--; nx>=0; nx--)
1330 xfrm_state_put(xfrm[nx]);
1335 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1336 struct xfrm_state **xfrm, unsigned short family)
1338 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1339 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1345 for (i = 0; i < npols; i++) {
1346 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1351 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1359 /* found states are sorted for outbound processing */
1361 xfrm_state_sort(xfrm, tpp, cnx, family);
1366 for (cnx--; cnx>=0; cnx--)
1367 xfrm_state_put(tpp[cnx]);
1372 /* Check that the bundle accepts the flow and its components are
1376 static inline int xfrm_get_tos(const struct flowi *fl, int family)
1378 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1384 tos = afinfo->get_tos(fl);
1386 xfrm_policy_put_afinfo(afinfo);
1391 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1393 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1394 struct dst_entry *dst = &xdst->u.dst;
1396 if (xdst->route == NULL) {
1397 /* Dummy bundle - if it has xfrms we were not
1398 * able to build bundle as template resolution failed.
1399 * It means we need to try again resolving. */
1400 if (xdst->num_xfrms > 0)
1402 } else if (dst->flags & DST_XFRM_QUEUE) {
1406 if (stale_bundle(dst))
1414 static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1416 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1417 struct dst_entry *dst = &xdst->u.dst;
1421 if (stale_bundle(dst))
1427 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1429 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1430 struct dst_entry *dst = &xdst->u.dst;
1435 static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1436 .get = xfrm_bundle_flo_get,
1437 .check = xfrm_bundle_flo_check,
1438 .delete = xfrm_bundle_flo_delete,
1441 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1443 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1444 struct dst_ops *dst_ops;
1445 struct xfrm_dst *xdst;
1448 return ERR_PTR(-EINVAL);
1452 dst_ops = &net->xfrm.xfrm4_dst_ops;
1454 #if IS_ENABLED(CONFIG_IPV6)
1456 dst_ops = &net->xfrm.xfrm6_dst_ops;
1462 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1465 struct dst_entry *dst = &xdst->u.dst;
1467 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1468 xdst->flo.ops = &xfrm_bundle_fc_ops;
1469 if (afinfo->init_dst)
1470 afinfo->init_dst(net, xdst);
1472 xdst = ERR_PTR(-ENOBUFS);
1474 xfrm_policy_put_afinfo(afinfo);
1479 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1482 struct xfrm_policy_afinfo *afinfo =
1483 xfrm_policy_get_afinfo(dst->ops->family);
1489 err = afinfo->init_path(path, dst, nfheader_len);
1491 xfrm_policy_put_afinfo(afinfo);
1496 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1497 const struct flowi *fl)
1499 struct xfrm_policy_afinfo *afinfo =
1500 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1506 err = afinfo->fill_dst(xdst, dev, fl);
1508 xfrm_policy_put_afinfo(afinfo);
1514 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1515 * all the metrics... Shortly, bundle a bundle.
1518 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1519 struct xfrm_state **xfrm, int nx,
1520 const struct flowi *fl,
1521 struct dst_entry *dst)
1523 struct net *net = xp_net(policy);
1524 unsigned long now = jiffies;
1525 struct net_device *dev;
1526 struct xfrm_mode *inner_mode;
1527 struct dst_entry *dst_prev = NULL;
1528 struct dst_entry *dst0 = NULL;
1532 int nfheader_len = 0;
1533 int trailer_len = 0;
1535 int family = policy->selector.family;
1536 xfrm_address_t saddr, daddr;
1538 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1540 tos = xfrm_get_tos(fl, family);
1547 for (; i < nx; i++) {
1548 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1549 struct dst_entry *dst1 = &xdst->u.dst;
1551 err = PTR_ERR(xdst);
1557 if (xfrm[i]->sel.family == AF_UNSPEC) {
1558 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1559 xfrm_af2proto(family));
1561 err = -EAFNOSUPPORT;
1566 inner_mode = xfrm[i]->inner_mode;
1571 dst_prev->child = dst_clone(dst1);
1572 dst1->flags |= DST_NOHASH;
1576 dst_copy_metrics(dst1, dst);
1578 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1579 family = xfrm[i]->props.family;
1580 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1588 dst1->xfrm = xfrm[i];
1589 xdst->xfrm_genid = xfrm[i]->genid;
1591 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1592 dst1->flags |= DST_HOST;
1593 dst1->lastuse = now;
1595 dst1->input = dst_discard;
1596 dst1->output = inner_mode->afinfo->output;
1598 dst1->next = dst_prev;
1601 header_len += xfrm[i]->props.header_len;
1602 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1603 nfheader_len += xfrm[i]->props.header_len;
1604 trailer_len += xfrm[i]->props.trailer_len;
1607 dst_prev->child = dst;
1615 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1616 xfrm_init_pmtu(dst_prev);
1618 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1619 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1621 err = xfrm_fill_dst(xdst, dev, fl);
1625 dst_prev->header_len = header_len;
1626 dst_prev->trailer_len = trailer_len;
1627 header_len -= xdst->u.dst.xfrm->props.header_len;
1628 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1636 xfrm_state_put(xfrm[i]);
1640 dst0 = ERR_PTR(err);
1645 xfrm_dst_alloc_copy(void **target, const void *src, int size)
1648 *target = kmalloc(size, GFP_ATOMIC);
1652 memcpy(*target, src, size);
1657 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
1659 #ifdef CONFIG_XFRM_SUB_POLICY
1660 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1661 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1669 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
1671 #ifdef CONFIG_XFRM_SUB_POLICY
1672 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1673 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1679 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1680 struct xfrm_policy **pols,
1681 int *num_pols, int *num_xfrms)
1685 if (*num_pols == 0 || !pols[0]) {
1690 if (IS_ERR(pols[0]))
1691 return PTR_ERR(pols[0]);
1693 *num_xfrms = pols[0]->xfrm_nr;
1695 #ifdef CONFIG_XFRM_SUB_POLICY
1696 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1697 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1698 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1699 XFRM_POLICY_TYPE_MAIN,
1703 if (IS_ERR(pols[1])) {
1704 xfrm_pols_put(pols, *num_pols);
1705 return PTR_ERR(pols[1]);
1708 (*num_xfrms) += pols[1]->xfrm_nr;
1712 for (i = 0; i < *num_pols; i++) {
1713 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1723 static struct xfrm_dst *
1724 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1725 const struct flowi *fl, u16 family,
1726 struct dst_entry *dst_orig)
1728 struct net *net = xp_net(pols[0]);
1729 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1730 struct dst_entry *dst;
1731 struct xfrm_dst *xdst;
1734 /* Try to instantiate a bundle */
1735 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1737 if (err != 0 && err != -EAGAIN)
1738 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1739 return ERR_PTR(err);
1742 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1744 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1745 return ERR_CAST(dst);
1748 xdst = (struct xfrm_dst *)dst;
1749 xdst->num_xfrms = err;
1751 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1753 err = xfrm_dst_update_origin(dst, fl);
1754 if (unlikely(err)) {
1756 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1757 return ERR_PTR(err);
1760 xdst->num_pols = num_pols;
1761 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1762 xdst->policy_genid = atomic_read(&pols[0]->genid);
1767 static void xfrm_policy_queue_process(unsigned long arg)
1770 struct sk_buff *skb;
1772 struct dst_entry *dst;
1773 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1774 struct xfrm_policy_queue *pq = &pol->polq;
1776 struct sk_buff_head list;
1778 spin_lock(&pq->hold_queue.lock);
1779 skb = skb_peek(&pq->hold_queue);
1781 spin_unlock(&pq->hold_queue.lock);
1786 xfrm_decode_session(skb, &fl, dst->ops->family);
1787 spin_unlock(&pq->hold_queue.lock);
1789 dst_hold(dst->path);
1790 dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1795 if (dst->flags & DST_XFRM_QUEUE) {
1798 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1801 pq->timeout = pq->timeout << 1;
1802 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1809 __skb_queue_head_init(&list);
1811 spin_lock(&pq->hold_queue.lock);
1813 skb_queue_splice_init(&pq->hold_queue, &list);
1814 spin_unlock(&pq->hold_queue.lock);
1816 while (!skb_queue_empty(&list)) {
1817 skb = __skb_dequeue(&list);
1819 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1820 dst_hold(skb_dst(skb)->path);
1821 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1830 skb_dst_set(skb, dst);
1832 err = dst_output(skb);
1841 xfrm_queue_purge(&pq->hold_queue);
1845 static int xdst_queue_output(struct sk_buff *skb)
1847 unsigned long sched_next;
1848 struct dst_entry *dst = skb_dst(skb);
1849 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1850 struct xfrm_policy *pol = xdst->pols[0];
1851 struct xfrm_policy_queue *pq = &pol->polq;
1852 const struct sk_buff *fclone = skb + 1;
1854 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
1855 fclone->fclone == SKB_FCLONE_CLONE)) {
1860 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1867 spin_lock_bh(&pq->hold_queue.lock);
1870 pq->timeout = XFRM_QUEUE_TMO_MIN;
1872 sched_next = jiffies + pq->timeout;
1874 if (del_timer(&pq->hold_timer)) {
1875 if (time_before(pq->hold_timer.expires, sched_next))
1876 sched_next = pq->hold_timer.expires;
1880 __skb_queue_tail(&pq->hold_queue, skb);
1881 if (!mod_timer(&pq->hold_timer, sched_next))
1884 spin_unlock_bh(&pq->hold_queue.lock);
1889 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1890 struct dst_entry *dst,
1891 const struct flowi *fl,
1896 struct net_device *dev;
1897 struct dst_entry *dst1;
1898 struct xfrm_dst *xdst;
1900 xdst = xfrm_alloc_dst(net, family);
1904 if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
1905 (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
1908 dst1 = &xdst->u.dst;
1912 dst_copy_metrics(dst1, dst);
1914 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1915 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1916 dst1->lastuse = jiffies;
1918 dst1->input = dst_discard;
1919 dst1->output = xdst_queue_output;
1925 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1932 err = xfrm_fill_dst(xdst, dev, fl);
1941 xdst = ERR_PTR(err);
1945 static struct flow_cache_object *
1946 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1947 struct flow_cache_object *oldflo, void *ctx)
1949 struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1950 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1951 struct xfrm_dst *xdst, *new_xdst;
1952 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1954 /* Check if the policies from old bundle are usable */
1957 xdst = container_of(oldflo, struct xfrm_dst, flo);
1958 num_pols = xdst->num_pols;
1959 num_xfrms = xdst->num_xfrms;
1961 for (i = 0; i < num_pols; i++) {
1962 pols[i] = xdst->pols[i];
1963 pol_dead |= pols[i]->walk.dead;
1966 dst_free(&xdst->u.dst);
1974 /* Resolve policies to use if we couldn't get them from
1975 * previous cache entry */
1978 pols[0] = __xfrm_policy_lookup(net, fl, family,
1979 flow_to_policy_dir(dir));
1980 err = xfrm_expand_policies(fl, family, pols,
1981 &num_pols, &num_xfrms);
1987 goto make_dummy_bundle;
1990 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1991 if (IS_ERR(new_xdst)) {
1992 err = PTR_ERR(new_xdst);
1996 goto make_dummy_bundle;
1997 dst_hold(&xdst->u.dst);
1999 } else if (new_xdst == NULL) {
2002 goto make_dummy_bundle;
2003 xdst->num_xfrms = 0;
2004 dst_hold(&xdst->u.dst);
2008 /* Kill the previous bundle */
2010 /* The policies were stolen for newly generated bundle */
2012 dst_free(&xdst->u.dst);
2015 /* Flow cache does not have reference, it dst_free()'s,
2016 * but we do need to return one reference for original caller */
2017 dst_hold(&new_xdst->u.dst);
2018 return &new_xdst->flo;
2021 /* We found policies, but there's no bundles to instantiate:
2022 * either because the policy blocks, has no transformations or
2023 * we could not build template (no xfrm_states).*/
2024 xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
2026 xfrm_pols_put(pols, num_pols);
2027 return ERR_CAST(xdst);
2029 xdst->num_pols = num_pols;
2030 xdst->num_xfrms = num_xfrms;
2031 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
2033 dst_hold(&xdst->u.dst);
2037 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2040 dst_free(&xdst->u.dst);
2042 xfrm_pols_put(pols, num_pols);
2043 return ERR_PTR(err);
2046 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2047 struct dst_entry *dst_orig)
2049 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2050 struct dst_entry *ret;
2053 dst_release(dst_orig);
2054 return ERR_PTR(-EINVAL);
2056 ret = afinfo->blackhole_route(net, dst_orig);
2058 xfrm_policy_put_afinfo(afinfo);
2063 /* Main function: finds/creates a bundle for given flow.
2065 * At the moment we eat a raw IP route. Mostly to speed up lookups
2066 * on interfaces with disabled IPsec.
2068 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2069 const struct flowi *fl,
2070 struct sock *sk, int flags)
2072 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2073 struct flow_cache_object *flo;
2074 struct xfrm_dst *xdst;
2075 struct dst_entry *dst, *route;
2076 u16 family = dst_orig->ops->family;
2077 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2078 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2085 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2087 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
2088 err = xfrm_expand_policies(fl, family, pols,
2089 &num_pols, &num_xfrms);
2094 if (num_xfrms <= 0) {
2095 drop_pols = num_pols;
2099 xdst = xfrm_resolve_and_create_bundle(
2103 xfrm_pols_put(pols, num_pols);
2104 err = PTR_ERR(xdst);
2106 } else if (xdst == NULL) {
2108 drop_pols = num_pols;
2112 dst_hold(&xdst->u.dst);
2114 spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
2115 xdst->u.dst.next = xfrm_policy_sk_bundles;
2116 xfrm_policy_sk_bundles = &xdst->u.dst;
2117 spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
2119 route = xdst->route;
2124 /* To accelerate a bit... */
2125 if ((dst_orig->flags & DST_NOXFRM) ||
2126 !net->xfrm.policy_count[XFRM_POLICY_OUT])
2129 flo = flow_cache_lookup(net, fl, family, dir,
2130 xfrm_bundle_lookup, dst_orig);
2137 xdst = container_of(flo, struct xfrm_dst, flo);
2139 num_pols = xdst->num_pols;
2140 num_xfrms = xdst->num_xfrms;
2141 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
2142 route = xdst->route;
2146 if (route == NULL && num_xfrms > 0) {
2147 /* The only case when xfrm_bundle_lookup() returns a
2148 * bundle with null route, is when the template could
2149 * not be resolved. It means policies are there, but
2150 * bundle could not be created, since we don't yet
2151 * have the xfrm_state's. We need to wait for KM to
2152 * negotiate new SA's or bail out with error.*/
2153 if (net->xfrm.sysctl_larval_drop) {
2155 xfrm_pols_put(pols, drop_pols);
2156 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2158 return make_blackhole(net, family, dst_orig);
2160 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
2161 DECLARE_WAITQUEUE(wait, current);
2163 add_wait_queue(&net->xfrm.km_waitq, &wait);
2164 set_current_state(TASK_INTERRUPTIBLE);
2166 set_current_state(TASK_RUNNING);
2167 remove_wait_queue(&net->xfrm.km_waitq, &wait);
2169 if (!signal_pending(current)) {
2178 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2186 if ((flags & XFRM_LOOKUP_ICMP) &&
2187 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2192 for (i = 0; i < num_pols; i++)
2193 pols[i]->curlft.use_time = get_seconds();
2195 if (num_xfrms < 0) {
2196 /* Prohibit the flow */
2197 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2200 } else if (num_xfrms > 0) {
2201 /* Flow transformed */
2202 dst_release(dst_orig);
2204 /* Flow passes untransformed */
2209 xfrm_pols_put(pols, drop_pols);
2210 if (dst && dst->xfrm &&
2211 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2212 dst->flags |= DST_XFRM_TUNNEL;
2216 if (!(flags & XFRM_LOOKUP_ICMP)) {
2224 dst_release(dst_orig);
2225 xfrm_pols_put(pols, drop_pols);
2226 return ERR_PTR(err);
2228 EXPORT_SYMBOL(xfrm_lookup);
2231 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2233 struct xfrm_state *x;
2235 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2237 x = skb->sp->xvec[idx];
2238 if (!x->type->reject)
2240 return x->type->reject(x, skb, fl);
2243 /* When skb is transformed back to its "native" form, we have to
2244 * check policy restrictions. At the moment we make this in maximally
2245 * stupid way. Shame on me. :-) Of course, connected sockets must
2246 * have policy cached at them.
2250 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2251 unsigned short family)
2253 if (xfrm_state_kern(x))
2254 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2255 return x->id.proto == tmpl->id.proto &&
2256 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2257 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2258 x->props.mode == tmpl->mode &&
2259 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2260 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2261 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2262 xfrm_state_addr_cmp(tmpl, x, family));
2266 * 0 or more than 0 is returned when validation is succeeded (either bypass
2267 * because of optional transport mode, or next index of the mathced secpath
2268 * state with the template.
2269 * -1 is returned when no matching template is found.
2270 * Otherwise "-2 - errored_index" is returned.
2273 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2274 unsigned short family)
2278 if (tmpl->optional) {
2279 if (tmpl->mode == XFRM_MODE_TRANSPORT)
2283 for (; idx < sp->len; idx++) {
2284 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2286 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2295 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2296 unsigned int family, int reverse)
2298 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2301 if (unlikely(afinfo == NULL))
2302 return -EAFNOSUPPORT;
2304 afinfo->decode_session(skb, fl, reverse);
2305 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2306 xfrm_policy_put_afinfo(afinfo);
2309 EXPORT_SYMBOL(__xfrm_decode_session);
2311 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2313 for (; k < sp->len; k++) {
2314 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2323 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2324 unsigned short family)
2326 struct net *net = dev_net(skb->dev);
2327 struct xfrm_policy *pol;
2328 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2337 reverse = dir & ~XFRM_POLICY_MASK;
2338 dir &= XFRM_POLICY_MASK;
2339 fl_dir = policy_to_flow_dir(dir);
2341 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2342 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2346 nf_nat_decode_session(skb, &fl, family);
2348 /* First, check used SA against their selectors. */
2352 for (i=skb->sp->len-1; i>=0; i--) {
2353 struct xfrm_state *x = skb->sp->xvec[i];
2354 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2355 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2362 if (sk && sk->sk_policy[dir]) {
2363 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2365 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2371 struct flow_cache_object *flo;
2373 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2374 xfrm_policy_lookup, NULL);
2375 if (IS_ERR_OR_NULL(flo))
2376 pol = ERR_CAST(flo);
2378 pol = container_of(flo, struct xfrm_policy, flo);
2382 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2387 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2388 xfrm_secpath_reject(xerr_idx, skb, &fl);
2389 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2395 pol->curlft.use_time = get_seconds();
2399 #ifdef CONFIG_XFRM_SUB_POLICY
2400 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2401 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2405 if (IS_ERR(pols[1])) {
2406 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2409 pols[1]->curlft.use_time = get_seconds();
2415 if (pol->action == XFRM_POLICY_ALLOW) {
2416 struct sec_path *sp;
2417 static struct sec_path dummy;
2418 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2419 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2420 struct xfrm_tmpl **tpp = tp;
2424 if ((sp = skb->sp) == NULL)
2427 for (pi = 0; pi < npols; pi++) {
2428 if (pols[pi] != pol &&
2429 pols[pi]->action != XFRM_POLICY_ALLOW) {
2430 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2433 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2434 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2437 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2438 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2442 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2446 /* For each tunnel xfrm, find the first matching tmpl.
2447 * For each tmpl before that, find corresponding xfrm.
2448 * Order is _important_. Later we will implement
2449 * some barriers, but at the moment barriers
2450 * are implied between each two transformations.
2452 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2453 k = xfrm_policy_ok(tpp[i], sp, k, family);
2456 /* "-2 - errored_index" returned */
2458 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2463 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2464 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2468 xfrm_pols_put(pols, npols);
2471 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2474 xfrm_secpath_reject(xerr_idx, skb, &fl);
2476 xfrm_pols_put(pols, npols);
2479 EXPORT_SYMBOL(__xfrm_policy_check);
2481 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2483 struct net *net = dev_net(skb->dev);
2485 struct dst_entry *dst;
2488 if (xfrm_decode_session(skb, &fl, family) < 0) {
2489 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2495 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2500 skb_dst_set(skb, dst);
2503 EXPORT_SYMBOL(__xfrm_route_forward);
2505 /* Optimize later using cookies and generation ids. */
2507 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2509 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2510 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2511 * get validated by dst_ops->check on every use. We do this
2512 * because when a normal route referenced by an XFRM dst is
2513 * obsoleted we do not go looking around for all parent
2514 * referencing XFRM dsts so that we can invalidate them. It
2515 * is just too much work. Instead we make the checks here on
2516 * every use. For example:
2518 * XFRM dst A --> IPv4 dst X
2520 * X is the "xdst->route" of A (X is also the "dst->path" of A
2521 * in this example). If X is marked obsolete, "A" will not
2522 * notice. That's what we are validating here via the
2523 * stale_bundle() check.
2525 * When a policy's bundle is pruned, we dst_free() the XFRM
2526 * dst which causes it's ->obsolete field to be set to
2527 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
2528 * this, we want to force a new route lookup.
2530 if (dst->obsolete < 0 && !stale_bundle(dst))
2536 static int stale_bundle(struct dst_entry *dst)
2538 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2541 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2543 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2544 dst->dev = dev_net(dev)->loopback_dev;
2549 EXPORT_SYMBOL(xfrm_dst_ifdown);
2551 static void xfrm_link_failure(struct sk_buff *skb)
2553 /* Impossible. Such dst must be popped before reaches point of failure. */
2556 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2559 if (dst->obsolete) {
2567 static void __xfrm_garbage_collect(struct net *net)
2569 struct dst_entry *head, *next;
2571 spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
2572 head = xfrm_policy_sk_bundles;
2573 xfrm_policy_sk_bundles = NULL;
2574 spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
2583 void xfrm_garbage_collect(struct net *net)
2586 __xfrm_garbage_collect(net);
2588 EXPORT_SYMBOL(xfrm_garbage_collect);
2590 static void xfrm_garbage_collect_deferred(struct net *net)
2592 flow_cache_flush_deferred();
2593 __xfrm_garbage_collect(net);
2596 static void xfrm_init_pmtu(struct dst_entry *dst)
2599 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2600 u32 pmtu, route_mtu_cached;
2602 pmtu = dst_mtu(dst->child);
2603 xdst->child_mtu_cached = pmtu;
2605 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2607 route_mtu_cached = dst_mtu(xdst->route);
2608 xdst->route_mtu_cached = route_mtu_cached;
2610 if (pmtu > route_mtu_cached)
2611 pmtu = route_mtu_cached;
2613 dst_metric_set(dst, RTAX_MTU, pmtu);
2614 } while ((dst = dst->next));
2617 /* Check that the bundle accepts the flow and its components are
2621 static int xfrm_bundle_ok(struct xfrm_dst *first)
2623 struct dst_entry *dst = &first->u.dst;
2624 struct xfrm_dst *last;
2627 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2628 (dst->dev && !netif_running(dst->dev)))
2631 if (dst->flags & DST_XFRM_QUEUE)
2637 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2639 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2641 if (xdst->xfrm_genid != dst->xfrm->genid)
2643 if (xdst->num_pols > 0 &&
2644 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2647 mtu = dst_mtu(dst->child);
2648 if (xdst->child_mtu_cached != mtu) {
2650 xdst->child_mtu_cached = mtu;
2653 if (!dst_check(xdst->route, xdst->route_cookie))
2655 mtu = dst_mtu(xdst->route);
2656 if (xdst->route_mtu_cached != mtu) {
2658 xdst->route_mtu_cached = mtu;
2662 } while (dst->xfrm);
2667 mtu = last->child_mtu_cached;
2671 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2672 if (mtu > last->route_mtu_cached)
2673 mtu = last->route_mtu_cached;
2674 dst_metric_set(dst, RTAX_MTU, mtu);
2679 last = (struct xfrm_dst *)last->u.dst.next;
2680 last->child_mtu_cached = mtu;
2686 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2688 return dst_metric_advmss(dst->path);
2691 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2693 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2695 return mtu ? : dst_mtu(dst->path);
2698 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2699 struct sk_buff *skb,
2702 return dst->path->ops->neigh_lookup(dst, skb, daddr);
2705 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2709 if (unlikely(afinfo == NULL))
2711 if (unlikely(afinfo->family >= NPROTO))
2712 return -EAFNOSUPPORT;
2713 spin_lock(&xfrm_policy_afinfo_lock);
2714 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2717 struct dst_ops *dst_ops = afinfo->dst_ops;
2718 if (likely(dst_ops->kmem_cachep == NULL))
2719 dst_ops->kmem_cachep = xfrm_dst_cache;
2720 if (likely(dst_ops->check == NULL))
2721 dst_ops->check = xfrm_dst_check;
2722 if (likely(dst_ops->default_advmss == NULL))
2723 dst_ops->default_advmss = xfrm_default_advmss;
2724 if (likely(dst_ops->mtu == NULL))
2725 dst_ops->mtu = xfrm_mtu;
2726 if (likely(dst_ops->negative_advice == NULL))
2727 dst_ops->negative_advice = xfrm_negative_advice;
2728 if (likely(dst_ops->link_failure == NULL))
2729 dst_ops->link_failure = xfrm_link_failure;
2730 if (likely(dst_ops->neigh_lookup == NULL))
2731 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2732 if (likely(afinfo->garbage_collect == NULL))
2733 afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2734 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2736 spin_unlock(&xfrm_policy_afinfo_lock);
2740 struct dst_ops *xfrm_dst_ops;
2742 switch (afinfo->family) {
2744 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2746 #if IS_ENABLED(CONFIG_IPV6)
2748 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2754 *xfrm_dst_ops = *afinfo->dst_ops;
2760 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2762 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2765 if (unlikely(afinfo == NULL))
2767 if (unlikely(afinfo->family >= NPROTO))
2768 return -EAFNOSUPPORT;
2769 spin_lock(&xfrm_policy_afinfo_lock);
2770 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2771 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2774 RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2777 spin_unlock(&xfrm_policy_afinfo_lock);
2779 struct dst_ops *dst_ops = afinfo->dst_ops;
2783 dst_ops->kmem_cachep = NULL;
2784 dst_ops->check = NULL;
2785 dst_ops->negative_advice = NULL;
2786 dst_ops->link_failure = NULL;
2787 afinfo->garbage_collect = NULL;
2791 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2793 static void __net_init xfrm_dst_ops_init(struct net *net)
2795 struct xfrm_policy_afinfo *afinfo;
2798 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
2800 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2801 #if IS_ENABLED(CONFIG_IPV6)
2802 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
2804 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2809 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2811 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2815 xfrm_garbage_collect(dev_net(dev));
2820 static struct notifier_block xfrm_dev_notifier = {
2821 .notifier_call = xfrm_dev_event,
2824 #ifdef CONFIG_XFRM_STATISTICS
2825 static int __net_init xfrm_statistics_init(struct net *net)
2829 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2830 sizeof(struct linux_xfrm_mib),
2831 __alignof__(struct linux_xfrm_mib)) < 0)
2833 rv = xfrm_proc_init(net);
2835 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2839 static void xfrm_statistics_fini(struct net *net)
2841 xfrm_proc_fini(net);
2842 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2845 static int __net_init xfrm_statistics_init(struct net *net)
2850 static void xfrm_statistics_fini(struct net *net)
2855 static int __net_init xfrm_policy_init(struct net *net)
2857 unsigned int hmask, sz;
2860 if (net_eq(net, &init_net))
2861 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2862 sizeof(struct xfrm_dst),
2863 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2867 sz = (hmask+1) * sizeof(struct hlist_head);
2869 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2870 if (!net->xfrm.policy_byidx)
2872 net->xfrm.policy_idx_hmask = hmask;
2874 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2875 struct xfrm_policy_hash *htab;
2877 net->xfrm.policy_count[dir] = 0;
2878 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2880 htab = &net->xfrm.policy_bydst[dir];
2881 htab->table = xfrm_hash_alloc(sz);
2884 htab->hmask = hmask;
2887 INIT_LIST_HEAD(&net->xfrm.policy_all);
2888 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2889 if (net_eq(net, &init_net))
2890 register_netdevice_notifier(&xfrm_dev_notifier);
2894 for (dir--; dir >= 0; dir--) {
2895 struct xfrm_policy_hash *htab;
2897 htab = &net->xfrm.policy_bydst[dir];
2898 xfrm_hash_free(htab->table, sz);
2900 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2905 static void xfrm_policy_fini(struct net *net)
2907 struct xfrm_audit audit_info;
2911 flush_work(&net->xfrm.policy_hash_work);
2912 #ifdef CONFIG_XFRM_SUB_POLICY
2913 audit_info.loginuid = INVALID_UID;
2914 audit_info.sessionid = -1;
2915 audit_info.secid = 0;
2916 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2918 audit_info.loginuid = INVALID_UID;
2919 audit_info.sessionid = -1;
2920 audit_info.secid = 0;
2921 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2923 WARN_ON(!list_empty(&net->xfrm.policy_all));
2925 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2926 struct xfrm_policy_hash *htab;
2928 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2930 htab = &net->xfrm.policy_bydst[dir];
2931 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2932 WARN_ON(!hlist_empty(htab->table));
2933 xfrm_hash_free(htab->table, sz);
2936 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2937 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2938 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2941 static int __net_init xfrm_net_init(struct net *net)
2945 rv = xfrm_statistics_init(net);
2947 goto out_statistics;
2948 rv = xfrm_state_init(net);
2951 rv = xfrm_policy_init(net);
2954 xfrm_dst_ops_init(net);
2955 rv = xfrm_sysctl_init(net);
2959 /* Initialize the per-net locks here */
2960 spin_lock_init(&net->xfrm.xfrm_state_lock);
2961 rwlock_init(&net->xfrm.xfrm_policy_lock);
2962 spin_lock_init(&net->xfrm.xfrm_policy_sk_bundle_lock);
2963 mutex_init(&net->xfrm.xfrm_cfg_mutex);
2968 xfrm_policy_fini(net);
2970 xfrm_state_fini(net);
2972 xfrm_statistics_fini(net);
2977 static void __net_exit xfrm_net_exit(struct net *net)
2979 xfrm_sysctl_fini(net);
2980 xfrm_policy_fini(net);
2981 xfrm_state_fini(net);
2982 xfrm_statistics_fini(net);
2985 static struct pernet_operations __net_initdata xfrm_net_ops = {
2986 .init = xfrm_net_init,
2987 .exit = xfrm_net_exit,
2990 void __init xfrm_init(void)
2992 register_pernet_subsys(&xfrm_net_ops);
2996 #ifdef CONFIG_AUDITSYSCALL
2997 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2998 struct audit_buffer *audit_buf)
3000 struct xfrm_sec_ctx *ctx = xp->security;
3001 struct xfrm_selector *sel = &xp->selector;
3004 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3005 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3007 switch(sel->family) {
3009 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
3010 if (sel->prefixlen_s != 32)
3011 audit_log_format(audit_buf, " src_prefixlen=%d",
3013 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3014 if (sel->prefixlen_d != 32)
3015 audit_log_format(audit_buf, " dst_prefixlen=%d",
3019 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3020 if (sel->prefixlen_s != 128)
3021 audit_log_format(audit_buf, " src_prefixlen=%d",
3023 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3024 if (sel->prefixlen_d != 128)
3025 audit_log_format(audit_buf, " dst_prefixlen=%d",
3031 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
3032 kuid_t auid, u32 sessionid, u32 secid)
3034 struct audit_buffer *audit_buf;
3036 audit_buf = xfrm_audit_start("SPD-add");
3037 if (audit_buf == NULL)
3039 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3040 audit_log_format(audit_buf, " res=%u", result);
3041 xfrm_audit_common_policyinfo(xp, audit_buf);
3042 audit_log_end(audit_buf);
3044 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3046 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3047 kuid_t auid, u32 sessionid, u32 secid)
3049 struct audit_buffer *audit_buf;
3051 audit_buf = xfrm_audit_start("SPD-delete");
3052 if (audit_buf == NULL)
3054 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3055 audit_log_format(audit_buf, " res=%u", result);
3056 xfrm_audit_common_policyinfo(xp, audit_buf);
3057 audit_log_end(audit_buf);
3059 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3062 #ifdef CONFIG_XFRM_MIGRATE
3063 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3064 const struct xfrm_selector *sel_tgt)
3066 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3067 if (sel_tgt->family == sel_cmp->family &&
3068 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3070 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3072 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3073 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3077 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3084 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3085 u8 dir, u8 type, struct net *net)
3087 struct xfrm_policy *pol, *ret = NULL;
3088 struct hlist_head *chain;
3091 read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
3092 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3093 hlist_for_each_entry(pol, chain, bydst) {
3094 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3095 pol->type == type) {
3097 priority = ret->priority;
3101 chain = &net->xfrm.policy_inexact[dir];
3102 hlist_for_each_entry(pol, chain, bydst) {
3103 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3104 pol->type == type &&
3105 pol->priority < priority) {
3114 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
3119 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3123 if (t->mode == m->mode && t->id.proto == m->proto &&
3124 (m->reqid == 0 || t->reqid == m->reqid)) {
3126 case XFRM_MODE_TUNNEL:
3127 case XFRM_MODE_BEET:
3128 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3130 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3135 case XFRM_MODE_TRANSPORT:
3136 /* in case of transport mode, template does not store
3137 any IP addresses, hence we just compare mode and
3148 /* update endpoint address(es) of template(s) */
3149 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3150 struct xfrm_migrate *m, int num_migrate)
3152 struct xfrm_migrate *mp;
3155 write_lock_bh(&pol->lock);
3156 if (unlikely(pol->walk.dead)) {
3157 /* target policy has been deleted */
3158 write_unlock_bh(&pol->lock);
3162 for (i = 0; i < pol->xfrm_nr; i++) {
3163 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3164 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3167 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3168 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3170 /* update endpoints */
3171 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3172 sizeof(pol->xfrm_vec[i].id.daddr));
3173 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3174 sizeof(pol->xfrm_vec[i].saddr));
3175 pol->xfrm_vec[i].encap_family = mp->new_family;
3177 atomic_inc(&pol->genid);
3181 write_unlock_bh(&pol->lock);
3189 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3193 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3196 for (i = 0; i < num_migrate; i++) {
3197 if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3199 xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3202 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3203 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3206 /* check if there is any duplicated entry */
3207 for (j = i + 1; j < num_migrate; j++) {
3208 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3209 sizeof(m[i].old_daddr)) &&
3210 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3211 sizeof(m[i].old_saddr)) &&
3212 m[i].proto == m[j].proto &&
3213 m[i].mode == m[j].mode &&
3214 m[i].reqid == m[j].reqid &&
3215 m[i].old_family == m[j].old_family)
3223 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3224 struct xfrm_migrate *m, int num_migrate,
3225 struct xfrm_kmaddress *k, struct net *net)
3227 int i, err, nx_cur = 0, nx_new = 0;
3228 struct xfrm_policy *pol = NULL;
3229 struct xfrm_state *x, *xc;
3230 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3231 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3232 struct xfrm_migrate *mp;
3234 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3237 /* Stage 1 - find policy */
3238 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3243 /* Stage 2 - find and update state(s) */
3244 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3245 if ((x = xfrm_migrate_state_find(mp, net))) {
3248 if ((xc = xfrm_state_migrate(x, mp))) {
3258 /* Stage 3 - update policy */
3259 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3262 /* Stage 4 - delete old state(s) */
3264 xfrm_states_put(x_cur, nx_cur);
3265 xfrm_states_delete(x_cur, nx_cur);
3268 /* Stage 5 - announce */
3269 km_migrate(sel, dir, type, m, num_migrate, k);
3281 xfrm_states_put(x_cur, nx_cur);
3283 xfrm_states_delete(x_new, nx_new);
3287 EXPORT_SYMBOL(xfrm_migrate);