Merge tag 'pinctrl-v5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6-microblaze.git] / net / xfrm / xfrm_device.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * xfrm_device.c - IPsec device offloading code.
4  *
5  * Copyright (c) 2015 secunet Security Networks AG
6  *
7  * Author:
8  * Steffen Klassert <steffen.klassert@secunet.com>
9  */
10
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <net/dst.h>
18 #include <net/xfrm.h>
19 #include <linux/notifier.h>
20
21 #ifdef CONFIG_XFRM_OFFLOAD
22 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
23                                   unsigned int hsize)
24 {
25         struct xfrm_offload *xo = xfrm_offload(skb);
26
27         skb_reset_mac_len(skb);
28         if (xo->flags & XFRM_GSO_SEGMENT)
29                 skb->transport_header -= x->props.header_len;
30
31         pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
32 }
33
34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
35                                     unsigned int hsize)
36
37 {
38         struct xfrm_offload *xo = xfrm_offload(skb);
39
40         if (xo->flags & XFRM_GSO_SEGMENT)
41                 skb->transport_header = skb->network_header + hsize;
42
43         skb_reset_mac_len(skb);
44         pskb_pull(skb, skb->mac_len + x->props.header_len);
45 }
46
47 static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
48                                   unsigned int hsize)
49 {
50         struct xfrm_offload *xo = xfrm_offload(skb);
51         int phlen = 0;
52
53         if (xo->flags & XFRM_GSO_SEGMENT)
54                 skb->transport_header = skb->network_header + hsize;
55
56         skb_reset_mac_len(skb);
57         if (x->sel.family != AF_INET6) {
58                 phlen = IPV4_BEET_PHMAXLEN;
59                 if (x->outer_mode.family == AF_INET6)
60                         phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
61         }
62
63         pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
64 }
65
66 /* Adjust pointers into the packet when IPsec is done at layer2 */
67 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
68 {
69         switch (x->outer_mode.encap) {
70         case XFRM_MODE_TUNNEL:
71                 if (x->outer_mode.family == AF_INET)
72                         return __xfrm_mode_tunnel_prep(x, skb,
73                                                        sizeof(struct iphdr));
74                 if (x->outer_mode.family == AF_INET6)
75                         return __xfrm_mode_tunnel_prep(x, skb,
76                                                        sizeof(struct ipv6hdr));
77                 break;
78         case XFRM_MODE_TRANSPORT:
79                 if (x->outer_mode.family == AF_INET)
80                         return __xfrm_transport_prep(x, skb,
81                                                      sizeof(struct iphdr));
82                 if (x->outer_mode.family == AF_INET6)
83                         return __xfrm_transport_prep(x, skb,
84                                                      sizeof(struct ipv6hdr));
85                 break;
86         case XFRM_MODE_BEET:
87                 if (x->outer_mode.family == AF_INET)
88                         return __xfrm_mode_beet_prep(x, skb,
89                                                      sizeof(struct iphdr));
90                 if (x->outer_mode.family == AF_INET6)
91                         return __xfrm_mode_beet_prep(x, skb,
92                                                      sizeof(struct ipv6hdr));
93                 break;
94         case XFRM_MODE_ROUTEOPTIMIZATION:
95         case XFRM_MODE_IN_TRIGGER:
96                 break;
97         }
98 }
99
100 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
101 {
102         int err;
103         unsigned long flags;
104         struct xfrm_state *x;
105         struct softnet_data *sd;
106         struct sk_buff *skb2, *nskb, *pskb = NULL;
107         netdev_features_t esp_features = features;
108         struct xfrm_offload *xo = xfrm_offload(skb);
109         struct net_device *dev = skb->dev;
110         struct sec_path *sp;
111
112         if (!xo || (xo->flags & XFRM_XMIT))
113                 return skb;
114
115         if (!(features & NETIF_F_HW_ESP))
116                 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
117
118         sp = skb_sec_path(skb);
119         x = sp->xvec[sp->len - 1];
120         if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
121                 return skb;
122
123         /* This skb was already validated on the upper/virtual dev */
124         if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
125                 return skb;
126
127         local_irq_save(flags);
128         sd = this_cpu_ptr(&softnet_data);
129         err = !skb_queue_empty(&sd->xfrm_backlog);
130         local_irq_restore(flags);
131
132         if (err) {
133                 *again = true;
134                 return skb;
135         }
136
137         xo->flags |= XFRM_XMIT;
138
139         if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
140                 struct sk_buff *segs;
141
142                 /* Packet got rerouted, fixup features and segment it. */
143                 esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
144
145                 segs = skb_gso_segment(skb, esp_features);
146                 if (IS_ERR(segs)) {
147                         kfree_skb(skb);
148                         atomic_long_inc(&dev->tx_dropped);
149                         return NULL;
150                 } else {
151                         consume_skb(skb);
152                         skb = segs;
153                 }
154         }
155
156         if (!skb->next) {
157                 esp_features |= skb->dev->gso_partial_features;
158                 xfrm_outer_mode_prep(x, skb);
159
160                 xo->flags |= XFRM_DEV_RESUME;
161
162                 err = x->type_offload->xmit(x, skb, esp_features);
163                 if (err) {
164                         if (err == -EINPROGRESS)
165                                 return NULL;
166
167                         XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
168                         kfree_skb(skb);
169                         return NULL;
170                 }
171
172                 skb_push(skb, skb->data - skb_mac_header(skb));
173
174                 return skb;
175         }
176
177         skb_list_walk_safe(skb, skb2, nskb) {
178                 esp_features |= skb->dev->gso_partial_features;
179                 skb_mark_not_on_list(skb2);
180
181                 xo = xfrm_offload(skb2);
182                 xo->flags |= XFRM_DEV_RESUME;
183
184                 xfrm_outer_mode_prep(x, skb2);
185
186                 err = x->type_offload->xmit(x, skb2, esp_features);
187                 if (!err) {
188                         skb2->next = nskb;
189                 } else if (err != -EINPROGRESS) {
190                         XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
191                         skb2->next = nskb;
192                         kfree_skb_list(skb2);
193                         return NULL;
194                 } else {
195                         if (skb == skb2)
196                                 skb = nskb;
197                         else
198                                 pskb->next = nskb;
199
200                         continue;
201                 }
202
203                 skb_push(skb2, skb2->data - skb_mac_header(skb2));
204                 pskb = skb2;
205         }
206
207         return skb;
208 }
209 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
210
211 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
212                        struct xfrm_user_offload *xuo)
213 {
214         int err;
215         struct dst_entry *dst;
216         struct net_device *dev;
217         struct xfrm_state_offload *xso = &x->xso;
218         xfrm_address_t *saddr;
219         xfrm_address_t *daddr;
220
221         if (!x->type_offload)
222                 return -EINVAL;
223
224         /* We don't yet support UDP encapsulation and TFC padding. */
225         if (x->encap || x->tfcpad)
226                 return -EINVAL;
227
228         dev = dev_get_by_index(net, xuo->ifindex);
229         if (!dev) {
230                 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
231                         saddr = &x->props.saddr;
232                         daddr = &x->id.daddr;
233                 } else {
234                         saddr = &x->id.daddr;
235                         daddr = &x->props.saddr;
236                 }
237
238                 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
239                                         x->props.family,
240                                         xfrm_smark_get(0, x));
241                 if (IS_ERR(dst))
242                         return 0;
243
244                 dev = dst->dev;
245
246                 dev_hold(dev);
247                 dst_release(dst);
248         }
249
250         if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
251                 xso->dev = NULL;
252                 dev_put(dev);
253                 return 0;
254         }
255
256         if (x->props.flags & XFRM_STATE_ESN &&
257             !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
258                 xso->dev = NULL;
259                 dev_put(dev);
260                 return -EINVAL;
261         }
262
263         xso->dev = dev;
264         xso->real_dev = dev;
265         xso->num_exthdrs = 1;
266         xso->flags = xuo->flags;
267
268         err = dev->xfrmdev_ops->xdo_dev_state_add(x);
269         if (err) {
270                 xso->num_exthdrs = 0;
271                 xso->flags = 0;
272                 xso->dev = NULL;
273                 dev_put(dev);
274
275                 if (err != -EOPNOTSUPP)
276                         return err;
277         }
278
279         return 0;
280 }
281 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
282
283 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
284 {
285         int mtu;
286         struct dst_entry *dst = skb_dst(skb);
287         struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
288         struct net_device *dev = x->xso.dev;
289
290         if (!x->type_offload || x->encap)
291                 return false;
292
293         if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
294             (!xdst->child->xfrm)) {
295                 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
296                 if (skb->len <= mtu)
297                         goto ok;
298
299                 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
300                         goto ok;
301         }
302
303         return false;
304
305 ok:
306         if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
307                 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
308
309         return true;
310 }
311 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
312
313 void xfrm_dev_resume(struct sk_buff *skb)
314 {
315         struct net_device *dev = skb->dev;
316         int ret = NETDEV_TX_BUSY;
317         struct netdev_queue *txq;
318         struct softnet_data *sd;
319         unsigned long flags;
320
321         rcu_read_lock();
322         txq = netdev_core_pick_tx(dev, skb, NULL);
323
324         HARD_TX_LOCK(dev, txq, smp_processor_id());
325         if (!netif_xmit_frozen_or_stopped(txq))
326                 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
327         HARD_TX_UNLOCK(dev, txq);
328
329         if (!dev_xmit_complete(ret)) {
330                 local_irq_save(flags);
331                 sd = this_cpu_ptr(&softnet_data);
332                 skb_queue_tail(&sd->xfrm_backlog, skb);
333                 raise_softirq_irqoff(NET_TX_SOFTIRQ);
334                 local_irq_restore(flags);
335         }
336         rcu_read_unlock();
337 }
338 EXPORT_SYMBOL_GPL(xfrm_dev_resume);
339
340 void xfrm_dev_backlog(struct softnet_data *sd)
341 {
342         struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
343         struct sk_buff_head list;
344         struct sk_buff *skb;
345
346         if (skb_queue_empty(xfrm_backlog))
347                 return;
348
349         __skb_queue_head_init(&list);
350
351         spin_lock(&xfrm_backlog->lock);
352         skb_queue_splice_init(xfrm_backlog, &list);
353         spin_unlock(&xfrm_backlog->lock);
354
355         while (!skb_queue_empty(&list)) {
356                 skb = __skb_dequeue(&list);
357                 xfrm_dev_resume(skb);
358         }
359
360 }
361 #endif
362
363 static int xfrm_api_check(struct net_device *dev)
364 {
365 #ifdef CONFIG_XFRM_OFFLOAD
366         if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
367             !(dev->features & NETIF_F_HW_ESP))
368                 return NOTIFY_BAD;
369
370         if ((dev->features & NETIF_F_HW_ESP) &&
371             (!(dev->xfrmdev_ops &&
372                dev->xfrmdev_ops->xdo_dev_state_add &&
373                dev->xfrmdev_ops->xdo_dev_state_delete)))
374                 return NOTIFY_BAD;
375 #else
376         if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
377                 return NOTIFY_BAD;
378 #endif
379
380         return NOTIFY_DONE;
381 }
382
383 static int xfrm_dev_register(struct net_device *dev)
384 {
385         return xfrm_api_check(dev);
386 }
387
388 static int xfrm_dev_feat_change(struct net_device *dev)
389 {
390         return xfrm_api_check(dev);
391 }
392
393 static int xfrm_dev_down(struct net_device *dev)
394 {
395         if (dev->features & NETIF_F_HW_ESP)
396                 xfrm_dev_state_flush(dev_net(dev), dev, true);
397
398         return NOTIFY_DONE;
399 }
400
401 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
402 {
403         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
404
405         switch (event) {
406         case NETDEV_REGISTER:
407                 return xfrm_dev_register(dev);
408
409         case NETDEV_FEAT_CHANGE:
410                 return xfrm_dev_feat_change(dev);
411
412         case NETDEV_DOWN:
413         case NETDEV_UNREGISTER:
414                 return xfrm_dev_down(dev);
415         }
416         return NOTIFY_DONE;
417 }
418
419 static struct notifier_block xfrm_dev_notifier = {
420         .notifier_call  = xfrm_dev_event,
421 };
422
423 void __init xfrm_dev_init(void)
424 {
425         register_netdevice_notifier(&xfrm_dev_notifier);
426 }