6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
24 EXPORT_SYMBOL(xfrm_nl);
26 u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
27 u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
28 /* Each xfrm_state may be linked to two tables:
30 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
31 2. Hash table by daddr to find what SAs exist for given
32 destination/tunnel endpoint. (output)
35 static DEFINE_SPINLOCK(xfrm_state_lock);
37 /* Hash table to find appropriate SA towards given target (endpoint
38 * of tunnel or destination of transport mode) allowed by selector.
40 * Main use is finding SA after policy selected tunnel or transport mode.
41 * Also, it can be used by ah/esp icmp error handler to find offending SA.
43 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
44 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
46 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
47 EXPORT_SYMBOL(km_waitq);
49 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
50 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
52 static struct work_struct xfrm_state_gc_work;
53 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
54 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
56 static int xfrm_state_gc_flush_bundles;
58 int __xfrm_state_delete(struct xfrm_state *x);
60 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
61 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
63 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
64 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
66 static void xfrm_state_gc_destroy(struct xfrm_state *x)
68 if (del_timer(&x->timer))
70 if (del_timer(&x->rtimer))
77 x->type->destructor(x);
78 xfrm_put_type(x->type);
80 security_xfrm_state_free(x);
84 static void xfrm_state_gc_task(void *data)
87 struct list_head *entry, *tmp;
88 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
90 if (xfrm_state_gc_flush_bundles) {
91 xfrm_state_gc_flush_bundles = 0;
95 spin_lock_bh(&xfrm_state_gc_lock);
96 list_splice_init(&xfrm_state_gc_list, &gc_list);
97 spin_unlock_bh(&xfrm_state_gc_lock);
99 list_for_each_safe(entry, tmp, &gc_list) {
100 x = list_entry(entry, struct xfrm_state, bydst);
101 xfrm_state_gc_destroy(x);
106 static inline unsigned long make_jiffies(long secs)
108 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
109 return MAX_SCHEDULE_TIMEOUT-1;
114 static void xfrm_timer_handler(unsigned long data)
116 struct xfrm_state *x = (struct xfrm_state*)data;
117 unsigned long now = (unsigned long)xtime.tv_sec;
118 long next = LONG_MAX;
122 if (x->km.state == XFRM_STATE_DEAD)
124 if (x->km.state == XFRM_STATE_EXPIRED)
126 if (x->lft.hard_add_expires_seconds) {
127 long tmo = x->lft.hard_add_expires_seconds +
128 x->curlft.add_time - now;
134 if (x->lft.hard_use_expires_seconds) {
135 long tmo = x->lft.hard_use_expires_seconds +
136 (x->curlft.use_time ? : now) - now;
144 if (x->lft.soft_add_expires_seconds) {
145 long tmo = x->lft.soft_add_expires_seconds +
146 x->curlft.add_time - now;
152 if (x->lft.soft_use_expires_seconds) {
153 long tmo = x->lft.soft_use_expires_seconds +
154 (x->curlft.use_time ? : now) - now;
163 km_state_expired(x, 0, 0);
165 if (next != LONG_MAX &&
166 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
171 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
172 x->km.state = XFRM_STATE_EXPIRED;
177 if (!__xfrm_state_delete(x) && x->id.spi)
178 km_state_expired(x, 1, 0);
181 spin_unlock(&x->lock);
185 static void xfrm_replay_timer_handler(unsigned long data);
187 struct xfrm_state *xfrm_state_alloc(void)
189 struct xfrm_state *x;
191 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
194 memset(x, 0, sizeof(struct xfrm_state));
195 atomic_set(&x->refcnt, 1);
196 atomic_set(&x->tunnel_users, 0);
197 INIT_LIST_HEAD(&x->bydst);
198 INIT_LIST_HEAD(&x->byspi);
199 init_timer(&x->timer);
200 x->timer.function = xfrm_timer_handler;
201 x->timer.data = (unsigned long)x;
202 init_timer(&x->rtimer);
203 x->rtimer.function = xfrm_replay_timer_handler;
204 x->rtimer.data = (unsigned long)x;
205 x->curlft.add_time = (unsigned long)xtime.tv_sec;
206 x->lft.soft_byte_limit = XFRM_INF;
207 x->lft.soft_packet_limit = XFRM_INF;
208 x->lft.hard_byte_limit = XFRM_INF;
209 x->lft.hard_packet_limit = XFRM_INF;
210 x->replay_maxage = 0;
211 x->replay_maxdiff = 0;
212 spin_lock_init(&x->lock);
216 EXPORT_SYMBOL(xfrm_state_alloc);
218 void __xfrm_state_destroy(struct xfrm_state *x)
220 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
222 spin_lock_bh(&xfrm_state_gc_lock);
223 list_add(&x->bydst, &xfrm_state_gc_list);
224 spin_unlock_bh(&xfrm_state_gc_lock);
225 schedule_work(&xfrm_state_gc_work);
227 EXPORT_SYMBOL(__xfrm_state_destroy);
229 int __xfrm_state_delete(struct xfrm_state *x)
233 if (x->km.state != XFRM_STATE_DEAD) {
234 x->km.state = XFRM_STATE_DEAD;
235 spin_lock(&xfrm_state_lock);
242 spin_unlock(&xfrm_state_lock);
243 if (del_timer(&x->timer))
245 if (del_timer(&x->rtimer))
248 /* The number two in this test is the reference
249 * mentioned in the comment below plus the reference
250 * our caller holds. A larger value means that
251 * there are DSTs attached to this xfrm_state.
253 if (atomic_read(&x->refcnt) > 2) {
254 xfrm_state_gc_flush_bundles = 1;
255 schedule_work(&xfrm_state_gc_work);
258 /* All xfrm_state objects are created by xfrm_state_alloc.
259 * The xfrm_state_alloc call gives a reference, and that
260 * is what we are dropping here.
268 EXPORT_SYMBOL(__xfrm_state_delete);
270 int xfrm_state_delete(struct xfrm_state *x)
274 spin_lock_bh(&x->lock);
275 err = __xfrm_state_delete(x);
276 spin_unlock_bh(&x->lock);
280 EXPORT_SYMBOL(xfrm_state_delete);
282 void xfrm_state_flush(u8 proto)
285 struct xfrm_state *x;
287 spin_lock_bh(&xfrm_state_lock);
288 for (i = 0; i < XFRM_DST_HSIZE; i++) {
290 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
291 if (!xfrm_state_kern(x) &&
292 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
294 spin_unlock_bh(&xfrm_state_lock);
296 xfrm_state_delete(x);
299 spin_lock_bh(&xfrm_state_lock);
304 spin_unlock_bh(&xfrm_state_lock);
307 EXPORT_SYMBOL(xfrm_state_flush);
310 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
311 struct xfrm_tmpl *tmpl,
312 xfrm_address_t *daddr, xfrm_address_t *saddr,
313 unsigned short family)
315 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
318 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
319 xfrm_state_put_afinfo(afinfo);
324 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
325 struct flowi *fl, struct xfrm_tmpl *tmpl,
326 struct xfrm_policy *pol, int *err,
327 unsigned short family)
329 unsigned h = xfrm_dst_hash(daddr, family);
330 struct xfrm_state *x, *x0;
331 int acquire_in_progress = 0;
333 struct xfrm_state *best = NULL;
334 struct xfrm_state_afinfo *afinfo;
336 afinfo = xfrm_state_get_afinfo(family);
337 if (afinfo == NULL) {
338 *err = -EAFNOSUPPORT;
342 spin_lock_bh(&xfrm_state_lock);
343 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
344 if (x->props.family == family &&
345 x->props.reqid == tmpl->reqid &&
346 xfrm_state_addr_check(x, daddr, saddr, family) &&
347 tmpl->mode == x->props.mode &&
348 tmpl->id.proto == x->id.proto &&
349 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
351 1. There is a valid state with matching selector.
353 2. Valid state with inappropriate selector. Skip.
355 Entering area of "sysdeps".
357 3. If state is not valid, selector is temporary,
358 it selects only session which triggered
359 previous resolution. Key manager will do
360 something to install a state with proper
363 if (x->km.state == XFRM_STATE_VALID) {
364 if (!xfrm_selector_match(&x->sel, fl, family) ||
365 !xfrm_sec_ctx_match(pol->security, x->security))
368 best->km.dying > x->km.dying ||
369 (best->km.dying == x->km.dying &&
370 best->curlft.add_time < x->curlft.add_time))
372 } else if (x->km.state == XFRM_STATE_ACQ) {
373 acquire_in_progress = 1;
374 } else if (x->km.state == XFRM_STATE_ERROR ||
375 x->km.state == XFRM_STATE_EXPIRED) {
376 if (xfrm_selector_match(&x->sel, fl, family) &&
377 xfrm_sec_ctx_match(pol->security, x->security))
384 if (!x && !error && !acquire_in_progress) {
386 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
387 tmpl->id.proto)) != NULL) {
392 x = xfrm_state_alloc();
397 /* Initialize temporary selector matching only
398 * to current session. */
399 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
401 if (km_query(x, tmpl, pol) == 0) {
402 x->km.state = XFRM_STATE_ACQ;
403 list_add_tail(&x->bydst, xfrm_state_bydst+h);
406 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
407 list_add(&x->byspi, xfrm_state_byspi+h);
410 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
412 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
413 add_timer(&x->timer);
415 x->km.state = XFRM_STATE_DEAD;
425 *err = acquire_in_progress ? -EAGAIN : error;
426 spin_unlock_bh(&xfrm_state_lock);
427 xfrm_state_put_afinfo(afinfo);
431 static void __xfrm_state_insert(struct xfrm_state *x)
433 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
435 list_add(&x->bydst, xfrm_state_bydst+h);
438 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
440 list_add(&x->byspi, xfrm_state_byspi+h);
443 if (!mod_timer(&x->timer, jiffies + HZ))
446 if (x->replay_maxage &&
447 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
453 void xfrm_state_insert(struct xfrm_state *x)
455 spin_lock_bh(&xfrm_state_lock);
456 __xfrm_state_insert(x);
457 spin_unlock_bh(&xfrm_state_lock);
459 xfrm_flush_all_bundles();
461 EXPORT_SYMBOL(xfrm_state_insert);
463 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
465 int xfrm_state_add(struct xfrm_state *x)
467 struct xfrm_state_afinfo *afinfo;
468 struct xfrm_state *x1;
472 family = x->props.family;
473 afinfo = xfrm_state_get_afinfo(family);
474 if (unlikely(afinfo == NULL))
475 return -EAFNOSUPPORT;
477 spin_lock_bh(&xfrm_state_lock);
479 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
488 x1 = __xfrm_find_acq_byseq(x->km.seq);
489 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
496 x1 = afinfo->find_acq(
497 x->props.mode, x->props.reqid, x->id.proto,
498 &x->id.daddr, &x->props.saddr, 0);
500 __xfrm_state_insert(x);
504 spin_unlock_bh(&xfrm_state_lock);
505 xfrm_state_put_afinfo(afinfo);
508 xfrm_flush_all_bundles();
511 xfrm_state_delete(x1);
517 EXPORT_SYMBOL(xfrm_state_add);
519 int xfrm_state_update(struct xfrm_state *x)
521 struct xfrm_state_afinfo *afinfo;
522 struct xfrm_state *x1;
525 afinfo = xfrm_state_get_afinfo(x->props.family);
526 if (unlikely(afinfo == NULL))
527 return -EAFNOSUPPORT;
529 spin_lock_bh(&xfrm_state_lock);
530 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
536 if (xfrm_state_kern(x1)) {
542 if (x1->km.state == XFRM_STATE_ACQ) {
543 __xfrm_state_insert(x);
549 spin_unlock_bh(&xfrm_state_lock);
550 xfrm_state_put_afinfo(afinfo);
556 xfrm_state_delete(x1);
562 spin_lock_bh(&x1->lock);
563 if (likely(x1->km.state == XFRM_STATE_VALID)) {
564 if (x->encap && x1->encap)
565 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
566 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
569 if (!mod_timer(&x1->timer, jiffies + HZ))
571 if (x1->curlft.use_time)
572 xfrm_state_check_expire(x1);
576 spin_unlock_bh(&x1->lock);
582 EXPORT_SYMBOL(xfrm_state_update);
584 int xfrm_state_check_expire(struct xfrm_state *x)
586 if (!x->curlft.use_time)
587 x->curlft.use_time = (unsigned long)xtime.tv_sec;
589 if (x->km.state != XFRM_STATE_VALID)
592 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
593 x->curlft.packets >= x->lft.hard_packet_limit) {
594 x->km.state = XFRM_STATE_EXPIRED;
595 if (!mod_timer(&x->timer, jiffies))
601 (x->curlft.bytes >= x->lft.soft_byte_limit ||
602 x->curlft.packets >= x->lft.soft_packet_limit)) {
604 km_state_expired(x, 0, 0);
608 EXPORT_SYMBOL(xfrm_state_check_expire);
610 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
612 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
616 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
618 /* Check tail too... */
622 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
624 int err = xfrm_state_check_expire(x);
627 err = xfrm_state_check_space(x, skb);
631 EXPORT_SYMBOL(xfrm_state_check);
634 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
635 unsigned short family)
637 struct xfrm_state *x;
638 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
642 spin_lock_bh(&xfrm_state_lock);
643 x = afinfo->state_lookup(daddr, spi, proto);
644 spin_unlock_bh(&xfrm_state_lock);
645 xfrm_state_put_afinfo(afinfo);
648 EXPORT_SYMBOL(xfrm_state_lookup);
651 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
652 xfrm_address_t *daddr, xfrm_address_t *saddr,
653 int create, unsigned short family)
655 struct xfrm_state *x;
656 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
660 spin_lock_bh(&xfrm_state_lock);
661 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
662 spin_unlock_bh(&xfrm_state_lock);
663 xfrm_state_put_afinfo(afinfo);
666 EXPORT_SYMBOL(xfrm_find_acq);
668 /* Silly enough, but I'm lazy to build resolution list */
670 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
673 struct xfrm_state *x;
675 for (i = 0; i < XFRM_DST_HSIZE; i++) {
676 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
677 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
686 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
688 struct xfrm_state *x;
690 spin_lock_bh(&xfrm_state_lock);
691 x = __xfrm_find_acq_byseq(seq);
692 spin_unlock_bh(&xfrm_state_lock);
695 EXPORT_SYMBOL(xfrm_find_acq_byseq);
697 u32 xfrm_get_acqseq(void)
701 static DEFINE_SPINLOCK(acqseq_lock);
703 spin_lock_bh(&acqseq_lock);
704 res = (++acqseq ? : ++acqseq);
705 spin_unlock_bh(&acqseq_lock);
708 EXPORT_SYMBOL(xfrm_get_acqseq);
711 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
714 struct xfrm_state *x0;
719 if (minspi == maxspi) {
720 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
728 minspi = ntohl(minspi);
729 maxspi = ntohl(maxspi);
730 for (h=0; h<maxspi-minspi+1; h++) {
731 spi = minspi + net_random()%(maxspi-minspi+1);
732 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
734 x->id.spi = htonl(spi);
741 spin_lock_bh(&xfrm_state_lock);
742 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
743 list_add(&x->byspi, xfrm_state_byspi+h);
745 spin_unlock_bh(&xfrm_state_lock);
749 EXPORT_SYMBOL(xfrm_alloc_spi);
751 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
755 struct xfrm_state *x;
759 spin_lock_bh(&xfrm_state_lock);
760 for (i = 0; i < XFRM_DST_HSIZE; i++) {
761 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
762 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
771 for (i = 0; i < XFRM_DST_HSIZE; i++) {
772 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
773 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
775 err = func(x, --count, data);
781 spin_unlock_bh(&xfrm_state_lock);
784 EXPORT_SYMBOL(xfrm_state_walk);
787 void xfrm_replay_notify(struct xfrm_state *x, int event)
790 /* we send notify messages in case
791 * 1. we updated on of the sequence numbers, and the seqno difference
792 * is at least x->replay_maxdiff, in this case we also update the
793 * timeout of our timer function
794 * 2. if x->replay_maxage has elapsed since last update,
795 * and there were changes
797 * The state structure must be locked!
801 case XFRM_REPLAY_UPDATE:
802 if (x->replay_maxdiff &&
803 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
804 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))
809 case XFRM_REPLAY_TIMEOUT:
810 if ((x->replay.seq == x->preplay.seq) &&
811 (x->replay.bitmap == x->preplay.bitmap) &&
812 (x->replay.oseq == x->preplay.oseq))
818 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
819 c.event = XFRM_MSG_NEWAE;
820 c.data.aevent = event;
821 km_state_notify(x, &c);
823 if (x->replay_maxage &&
824 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
828 static void xfrm_replay_timer_handler(unsigned long data)
830 struct xfrm_state *x = (struct xfrm_state*)data;
834 if (xfrm_aevent_is_on() && x->km.state == XFRM_STATE_VALID)
835 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
837 spin_unlock(&x->lock);
840 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
846 if (unlikely(seq == 0))
849 if (likely(seq > x->replay.seq))
852 diff = x->replay.seq - seq;
853 if (diff >= x->props.replay_window) {
854 x->stats.replay_window++;
858 if (x->replay.bitmap & (1U << diff)) {
864 EXPORT_SYMBOL(xfrm_replay_check);
866 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
872 if (seq > x->replay.seq) {
873 diff = seq - x->replay.seq;
874 if (diff < x->props.replay_window)
875 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
877 x->replay.bitmap = 1;
880 diff = x->replay.seq - seq;
881 x->replay.bitmap |= (1U << diff);
884 if (xfrm_aevent_is_on())
885 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
887 EXPORT_SYMBOL(xfrm_replay_advance);
889 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
890 static DEFINE_RWLOCK(xfrm_km_lock);
892 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
896 read_lock(&xfrm_km_lock);
897 list_for_each_entry(km, &xfrm_km_list, list)
898 if (km->notify_policy)
899 km->notify_policy(xp, dir, c);
900 read_unlock(&xfrm_km_lock);
903 void km_state_notify(struct xfrm_state *x, struct km_event *c)
906 read_lock(&xfrm_km_lock);
907 list_for_each_entry(km, &xfrm_km_list, list)
910 read_unlock(&xfrm_km_lock);
913 EXPORT_SYMBOL(km_policy_notify);
914 EXPORT_SYMBOL(km_state_notify);
916 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
922 c.event = XFRM_MSG_EXPIRE;
923 km_state_notify(x, &c);
929 EXPORT_SYMBOL(km_state_expired);
931 * We send to all registered managers regardless of failure
932 * We are happy with one success
934 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
936 int err = -EINVAL, acqret;
939 read_lock(&xfrm_km_lock);
940 list_for_each_entry(km, &xfrm_km_list, list) {
941 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
945 read_unlock(&xfrm_km_lock);
948 EXPORT_SYMBOL(km_query);
950 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
955 read_lock(&xfrm_km_lock);
956 list_for_each_entry(km, &xfrm_km_list, list) {
958 err = km->new_mapping(x, ipaddr, sport);
962 read_unlock(&xfrm_km_lock);
965 EXPORT_SYMBOL(km_new_mapping);
967 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
973 c.event = XFRM_MSG_POLEXPIRE;
974 km_policy_notify(pol, dir, &c);
980 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
985 struct xfrm_policy *pol = NULL;
987 if (optlen <= 0 || optlen > PAGE_SIZE)
990 data = kmalloc(optlen, GFP_KERNEL);
995 if (copy_from_user(data, optval, optlen))
999 read_lock(&xfrm_km_lock);
1000 list_for_each_entry(km, &xfrm_km_list, list) {
1001 pol = km->compile_policy(sk->sk_family, optname, data,
1006 read_unlock(&xfrm_km_lock);
1009 xfrm_sk_policy_insert(sk, err, pol);
1018 EXPORT_SYMBOL(xfrm_user_policy);
1020 int xfrm_register_km(struct xfrm_mgr *km)
1022 write_lock_bh(&xfrm_km_lock);
1023 list_add_tail(&km->list, &xfrm_km_list);
1024 write_unlock_bh(&xfrm_km_lock);
1027 EXPORT_SYMBOL(xfrm_register_km);
1029 int xfrm_unregister_km(struct xfrm_mgr *km)
1031 write_lock_bh(&xfrm_km_lock);
1032 list_del(&km->list);
1033 write_unlock_bh(&xfrm_km_lock);
1036 EXPORT_SYMBOL(xfrm_unregister_km);
1038 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1041 if (unlikely(afinfo == NULL))
1043 if (unlikely(afinfo->family >= NPROTO))
1044 return -EAFNOSUPPORT;
1045 write_lock(&xfrm_state_afinfo_lock);
1046 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1049 afinfo->state_bydst = xfrm_state_bydst;
1050 afinfo->state_byspi = xfrm_state_byspi;
1051 xfrm_state_afinfo[afinfo->family] = afinfo;
1053 write_unlock(&xfrm_state_afinfo_lock);
1056 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1058 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1061 if (unlikely(afinfo == NULL))
1063 if (unlikely(afinfo->family >= NPROTO))
1064 return -EAFNOSUPPORT;
1065 write_lock(&xfrm_state_afinfo_lock);
1066 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1067 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1070 xfrm_state_afinfo[afinfo->family] = NULL;
1071 afinfo->state_byspi = NULL;
1072 afinfo->state_bydst = NULL;
1075 write_unlock(&xfrm_state_afinfo_lock);
1078 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1080 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1082 struct xfrm_state_afinfo *afinfo;
1083 if (unlikely(family >= NPROTO))
1085 read_lock(&xfrm_state_afinfo_lock);
1086 afinfo = xfrm_state_afinfo[family];
1087 if (likely(afinfo != NULL))
1088 read_lock(&afinfo->lock);
1089 read_unlock(&xfrm_state_afinfo_lock);
1093 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1095 if (unlikely(afinfo == NULL))
1097 read_unlock(&afinfo->lock);
1100 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1101 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1104 struct xfrm_state *t = x->tunnel;
1106 if (atomic_read(&t->tunnel_users) == 2)
1107 xfrm_state_delete(t);
1108 atomic_dec(&t->tunnel_users);
1113 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1116 * This function is NOT optimal. For example, with ESP it will give an
1117 * MTU that's usually two bytes short of being optimal. However, it will
1118 * usually give an answer that's a multiple of 4 provided the input is
1119 * also a multiple of 4.
1121 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1125 res -= x->props.header_len;
1133 spin_lock_bh(&x->lock);
1134 if (x->km.state == XFRM_STATE_VALID &&
1135 x->type && x->type->get_max_size)
1136 m = x->type->get_max_size(x, m);
1138 m += x->props.header_len;
1139 spin_unlock_bh(&x->lock);
1149 EXPORT_SYMBOL(xfrm_state_mtu);
1151 int xfrm_init_state(struct xfrm_state *x)
1153 struct xfrm_state_afinfo *afinfo;
1154 int family = x->props.family;
1157 err = -EAFNOSUPPORT;
1158 afinfo = xfrm_state_get_afinfo(family);
1163 if (afinfo->init_flags)
1164 err = afinfo->init_flags(x);
1166 xfrm_state_put_afinfo(afinfo);
1171 err = -EPROTONOSUPPORT;
1172 x->type = xfrm_get_type(x->id.proto, family);
1173 if (x->type == NULL)
1176 err = x->type->init_state(x);
1180 x->km.state = XFRM_STATE_VALID;
1186 EXPORT_SYMBOL(xfrm_init_state);
1188 void __init xfrm_state_init(void)
1192 for (i=0; i<XFRM_DST_HSIZE; i++) {
1193 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1194 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1196 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);