Merge branch 'for-5.13/warnings' into for-linus
[linux-2.6-microblaze.git] / net / ipv6 / xfrm6_tunnel.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C)2003,2004 USAGI/WIDE Project
4  *
5  * Authors      Mitsuru KANDA  <mk@linux-ipv6.org>
6  *              YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
7  *
8  * Based on net/ipv4/xfrm4_tunnel.c
9  */
10 #include <linux/module.h>
11 #include <linux/xfrm.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
14 #include <net/ip.h>
15 #include <net/xfrm.h>
16 #include <net/ipv6.h>
17 #include <linux/ipv6.h>
18 #include <linux/icmpv6.h>
19 #include <linux/mutex.h>
20 #include <net/netns/generic.h>
21
22 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
23 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
24
25 #define XFRM6_TUNNEL_SPI_MIN    1
26 #define XFRM6_TUNNEL_SPI_MAX    0xffffffff
27
28 struct xfrm6_tunnel_net {
29         struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
30         struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
31         u32 spi;
32 };
33
34 static unsigned int xfrm6_tunnel_net_id __read_mostly;
35 static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
36 {
37         return net_generic(net, xfrm6_tunnel_net_id);
38 }
39
40 /*
41  * xfrm_tunnel_spi things are for allocating unique id ("spi")
42  * per xfrm_address_t.
43  */
44 struct xfrm6_tunnel_spi {
45         struct hlist_node       list_byaddr;
46         struct hlist_node       list_byspi;
47         xfrm_address_t          addr;
48         u32                     spi;
49         refcount_t              refcnt;
50         struct rcu_head         rcu_head;
51 };
52
53 static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
54
55 static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
56
57 static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
58 {
59         unsigned int h;
60
61         h = ipv6_addr_hash((const struct in6_addr *)addr);
62         h ^= h >> 16;
63         h ^= h >> 8;
64         h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
65
66         return h;
67 }
68
69 static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
70 {
71         return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
72 }
73
74 static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
75 {
76         struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
77         struct xfrm6_tunnel_spi *x6spi;
78
79         hlist_for_each_entry_rcu(x6spi,
80                              &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
81                              list_byaddr, lockdep_is_held(&xfrm6_tunnel_spi_lock)) {
82                 if (xfrm6_addr_equal(&x6spi->addr, saddr))
83                         return x6spi;
84         }
85
86         return NULL;
87 }
88
89 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
90 {
91         struct xfrm6_tunnel_spi *x6spi;
92         u32 spi;
93
94         rcu_read_lock_bh();
95         x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
96         spi = x6spi ? x6spi->spi : 0;
97         rcu_read_unlock_bh();
98         return htonl(spi);
99 }
100 EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
101
102 static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
103 {
104         struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
105         struct xfrm6_tunnel_spi *x6spi;
106         int index = xfrm6_tunnel_spi_hash_byspi(spi);
107
108         hlist_for_each_entry(x6spi,
109                              &xfrm6_tn->spi_byspi[index],
110                              list_byspi) {
111                 if (x6spi->spi == spi)
112                         return -1;
113         }
114         return index;
115 }
116
117 static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
118 {
119         struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
120         u32 spi;
121         struct xfrm6_tunnel_spi *x6spi;
122         int index;
123
124         if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
125             xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
126                 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
127         else
128                 xfrm6_tn->spi++;
129
130         for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
131                 index = __xfrm6_tunnel_spi_check(net, spi);
132                 if (index >= 0)
133                         goto alloc_spi;
134
135                 if (spi == XFRM6_TUNNEL_SPI_MAX)
136                         break;
137         }
138         for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
139                 index = __xfrm6_tunnel_spi_check(net, spi);
140                 if (index >= 0)
141                         goto alloc_spi;
142         }
143         spi = 0;
144         goto out;
145 alloc_spi:
146         xfrm6_tn->spi = spi;
147         x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
148         if (!x6spi)
149                 goto out;
150
151         memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
152         x6spi->spi = spi;
153         refcount_set(&x6spi->refcnt, 1);
154
155         hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
156
157         index = xfrm6_tunnel_spi_hash_byaddr(saddr);
158         hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
159 out:
160         return spi;
161 }
162
163 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
164 {
165         struct xfrm6_tunnel_spi *x6spi;
166         u32 spi;
167
168         spin_lock_bh(&xfrm6_tunnel_spi_lock);
169         x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
170         if (x6spi) {
171                 refcount_inc(&x6spi->refcnt);
172                 spi = x6spi->spi;
173         } else
174                 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
175         spin_unlock_bh(&xfrm6_tunnel_spi_lock);
176
177         return htonl(spi);
178 }
179 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
180
181 static void x6spi_destroy_rcu(struct rcu_head *head)
182 {
183         kmem_cache_free(xfrm6_tunnel_spi_kmem,
184                         container_of(head, struct xfrm6_tunnel_spi, rcu_head));
185 }
186
187 static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
188 {
189         struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
190         struct xfrm6_tunnel_spi *x6spi;
191         struct hlist_node *n;
192
193         spin_lock_bh(&xfrm6_tunnel_spi_lock);
194
195         hlist_for_each_entry_safe(x6spi, n,
196                                   &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
197                                   list_byaddr)
198         {
199                 if (xfrm6_addr_equal(&x6spi->addr, saddr)) {
200                         if (refcount_dec_and_test(&x6spi->refcnt)) {
201                                 hlist_del_rcu(&x6spi->list_byaddr);
202                                 hlist_del_rcu(&x6spi->list_byspi);
203                                 call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
204                                 break;
205                         }
206                 }
207         }
208         spin_unlock_bh(&xfrm6_tunnel_spi_lock);
209 }
210
211 static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
212 {
213         skb_push(skb, -skb_network_offset(skb));
214         return 0;
215 }
216
217 static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
218 {
219         return skb_network_header(skb)[IP6CB(skb)->nhoff];
220 }
221
222 static int xfrm6_tunnel_rcv(struct sk_buff *skb)
223 {
224         struct net *net = dev_net(skb->dev);
225         const struct ipv6hdr *iph = ipv6_hdr(skb);
226         __be32 spi;
227
228         spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
229         return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
230 }
231
232 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
233                             u8 type, u8 code, int offset, __be32 info)
234 {
235         /* xfrm6_tunnel native err handling */
236         switch (type) {
237         case ICMPV6_DEST_UNREACH:
238                 switch (code) {
239                 case ICMPV6_NOROUTE:
240                 case ICMPV6_ADM_PROHIBITED:
241                 case ICMPV6_NOT_NEIGHBOUR:
242                 case ICMPV6_ADDR_UNREACH:
243                 case ICMPV6_PORT_UNREACH:
244                 default:
245                         break;
246                 }
247                 break;
248         case ICMPV6_PKT_TOOBIG:
249                 break;
250         case ICMPV6_TIME_EXCEED:
251                 switch (code) {
252                 case ICMPV6_EXC_HOPLIMIT:
253                         break;
254                 case ICMPV6_EXC_FRAGTIME:
255                 default:
256                         break;
257                 }
258                 break;
259         case ICMPV6_PARAMPROB:
260                 switch (code) {
261                 case ICMPV6_HDR_FIELD: break;
262                 case ICMPV6_UNK_NEXTHDR: break;
263                 case ICMPV6_UNK_OPTION: break;
264                 }
265                 break;
266         default:
267                 break;
268         }
269
270         return 0;
271 }
272
273 static int xfrm6_tunnel_init_state(struct xfrm_state *x)
274 {
275         if (x->props.mode != XFRM_MODE_TUNNEL)
276                 return -EINVAL;
277
278         if (x->encap)
279                 return -EINVAL;
280
281         x->props.header_len = sizeof(struct ipv6hdr);
282
283         return 0;
284 }
285
286 static void xfrm6_tunnel_destroy(struct xfrm_state *x)
287 {
288         struct net *net = xs_net(x);
289
290         xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
291 }
292
293 static const struct xfrm_type xfrm6_tunnel_type = {
294         .description    = "IP6IP6",
295         .owner          = THIS_MODULE,
296         .proto          = IPPROTO_IPV6,
297         .init_state     = xfrm6_tunnel_init_state,
298         .destructor     = xfrm6_tunnel_destroy,
299         .input          = xfrm6_tunnel_input,
300         .output         = xfrm6_tunnel_output,
301 };
302
303 static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
304         .handler        = xfrm6_tunnel_rcv,
305         .err_handler    = xfrm6_tunnel_err,
306         .priority       = 3,
307 };
308
309 static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
310         .handler        = xfrm6_tunnel_rcv,
311         .err_handler    = xfrm6_tunnel_err,
312         .priority       = 3,
313 };
314
315 static int __net_init xfrm6_tunnel_net_init(struct net *net)
316 {
317         struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
318         unsigned int i;
319
320         for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
321                 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
322         for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
323                 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
324         xfrm6_tn->spi = 0;
325
326         return 0;
327 }
328
329 static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
330 {
331         struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
332         unsigned int i;
333
334         xfrm_flush_gc();
335         xfrm_state_flush(net, 0, false, true);
336
337         for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
338                 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
339
340         for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
341                 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byspi[i]));
342 }
343
344 static struct pernet_operations xfrm6_tunnel_net_ops = {
345         .init   = xfrm6_tunnel_net_init,
346         .exit   = xfrm6_tunnel_net_exit,
347         .id     = &xfrm6_tunnel_net_id,
348         .size   = sizeof(struct xfrm6_tunnel_net),
349 };
350
351 static int __init xfrm6_tunnel_init(void)
352 {
353         int rv;
354
355         xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
356                                                   sizeof(struct xfrm6_tunnel_spi),
357                                                   0, SLAB_HWCACHE_ALIGN,
358                                                   NULL);
359         if (!xfrm6_tunnel_spi_kmem)
360                 return -ENOMEM;
361         rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
362         if (rv < 0)
363                 goto out_pernet;
364         rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
365         if (rv < 0)
366                 goto out_type;
367         rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
368         if (rv < 0)
369                 goto out_xfrm6;
370         rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
371         if (rv < 0)
372                 goto out_xfrm46;
373         return 0;
374
375 out_xfrm46:
376         xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
377 out_xfrm6:
378         xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
379 out_type:
380         unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
381 out_pernet:
382         kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
383         return rv;
384 }
385
386 static void __exit xfrm6_tunnel_fini(void)
387 {
388         xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
389         xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
390         xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
391         unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
392         /* Someone maybe has gotten the xfrm6_tunnel_spi.
393          * So need to wait it.
394          */
395         rcu_barrier();
396         kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
397 }
398
399 module_init(xfrm6_tunnel_init);
400 module_exit(xfrm6_tunnel_fini);
401 MODULE_LICENSE("GPL");
402 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);