2 * xfrm_device.c - IPsec device offloading code.
4 * Copyright (c) 2015 secunet Security Networks AG
7 * Steffen Klassert <steffen.klassert@secunet.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/errno.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
23 #include <linux/notifier.h>
25 #ifdef CONFIG_XFRM_OFFLOAD
26 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
29 struct xfrm_offload *xo = xfrm_offload(skb);
31 skb_reset_mac_len(skb);
32 pskb_pull(skb, skb->mac_len + hsize + x->props.header_len);
34 if (xo->flags & XFRM_GSO_SEGMENT) {
35 skb_reset_transport_header(skb);
36 skb->transport_header -= x->props.header_len;
40 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
44 struct xfrm_offload *xo = xfrm_offload(skb);
46 if (xo->flags & XFRM_GSO_SEGMENT)
47 skb->transport_header = skb->network_header + hsize;
49 skb_reset_mac_len(skb);
50 pskb_pull(skb, skb->mac_len + x->props.header_len);
53 /* Adjust pointers into the packet when IPsec is done at layer2 */
54 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
56 switch (x->outer_mode.encap) {
57 case XFRM_MODE_TUNNEL:
58 if (x->outer_mode.family == AF_INET)
59 return __xfrm_mode_tunnel_prep(x, skb,
60 sizeof(struct iphdr));
61 if (x->outer_mode.family == AF_INET6)
62 return __xfrm_mode_tunnel_prep(x, skb,
63 sizeof(struct ipv6hdr));
65 case XFRM_MODE_TRANSPORT:
66 if (x->outer_mode.family == AF_INET)
67 return __xfrm_transport_prep(x, skb,
68 sizeof(struct iphdr));
69 if (x->outer_mode.family == AF_INET6)
70 return __xfrm_transport_prep(x, skb,
71 sizeof(struct ipv6hdr));
73 case XFRM_MODE_ROUTEOPTIMIZATION:
74 case XFRM_MODE_IN_TRIGGER:
80 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
86 struct softnet_data *sd;
87 netdev_features_t esp_features = features;
88 struct xfrm_offload *xo = xfrm_offload(skb);
94 if (!(features & NETIF_F_HW_ESP))
95 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
97 sp = skb_sec_path(skb);
98 x = sp->xvec[sp->len - 1];
99 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
102 local_irq_save(flags);
103 sd = this_cpu_ptr(&softnet_data);
104 err = !skb_queue_empty(&sd->xfrm_backlog);
105 local_irq_restore(flags);
112 if (skb_is_gso(skb)) {
113 struct net_device *dev = skb->dev;
115 if (unlikely(x->xso.dev != dev)) {
116 struct sk_buff *segs;
118 /* Packet got rerouted, fixup features and segment it. */
119 esp_features = esp_features & ~(NETIF_F_HW_ESP
122 segs = skb_gso_segment(skb, esp_features);
125 atomic_long_inc(&dev->tx_dropped);
135 esp_features |= skb->dev->gso_partial_features;
136 xfrm_outer_mode_prep(x, skb);
138 xo->flags |= XFRM_DEV_RESUME;
140 err = x->type_offload->xmit(x, skb, esp_features);
142 if (err == -EINPROGRESS)
145 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
150 skb_push(skb, skb->data - skb_mac_header(skb));
158 struct sk_buff *nskb = skb2->next;
160 esp_features |= skb->dev->gso_partial_features;
161 skb_mark_not_on_list(skb2);
163 xo = xfrm_offload(skb2);
164 xo->flags |= XFRM_DEV_RESUME;
166 xfrm_outer_mode_prep(x, skb2);
168 err = x->type_offload->xmit(x, skb2, esp_features);
171 } else if (err != -EINPROGRESS) {
172 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
174 kfree_skb_list(skb2);
186 skb_push(skb2, skb2->data - skb_mac_header(skb2));
194 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
196 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
197 struct xfrm_user_offload *xuo)
200 struct dst_entry *dst;
201 struct net_device *dev;
202 struct xfrm_state_offload *xso = &x->xso;
203 xfrm_address_t *saddr;
204 xfrm_address_t *daddr;
206 if (!x->type_offload)
209 /* We don't yet support UDP encapsulation and TFC padding. */
210 if (x->encap || x->tfcpad)
213 dev = dev_get_by_index(net, xuo->ifindex);
215 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
216 saddr = &x->props.saddr;
217 daddr = &x->id.daddr;
219 saddr = &x->id.daddr;
220 daddr = &x->props.saddr;
223 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
225 xfrm_smark_get(0, x));
235 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
241 if (x->props.flags & XFRM_STATE_ESN &&
242 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
249 xso->num_exthdrs = 1;
250 xso->flags = xuo->flags;
252 err = dev->xfrmdev_ops->xdo_dev_state_add(x);
254 xso->num_exthdrs = 0;
259 if (err != -EOPNOTSUPP)
265 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
267 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
270 struct dst_entry *dst = skb_dst(skb);
271 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
272 struct net_device *dev = x->xso.dev;
274 if (!x->type_offload || x->encap)
277 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
278 (!xdst->child->xfrm && x->type->get_mtu)) {
279 mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
284 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
291 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
292 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
296 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
298 void xfrm_dev_resume(struct sk_buff *skb)
300 struct net_device *dev = skb->dev;
301 int ret = NETDEV_TX_BUSY;
302 struct netdev_queue *txq;
303 struct softnet_data *sd;
307 txq = netdev_core_pick_tx(dev, skb, NULL);
309 HARD_TX_LOCK(dev, txq, smp_processor_id());
310 if (!netif_xmit_frozen_or_stopped(txq))
311 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
312 HARD_TX_UNLOCK(dev, txq);
314 if (!dev_xmit_complete(ret)) {
315 local_irq_save(flags);
316 sd = this_cpu_ptr(&softnet_data);
317 skb_queue_tail(&sd->xfrm_backlog, skb);
318 raise_softirq_irqoff(NET_TX_SOFTIRQ);
319 local_irq_restore(flags);
323 EXPORT_SYMBOL_GPL(xfrm_dev_resume);
325 void xfrm_dev_backlog(struct softnet_data *sd)
327 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
328 struct sk_buff_head list;
331 if (skb_queue_empty(xfrm_backlog))
334 __skb_queue_head_init(&list);
336 spin_lock(&xfrm_backlog->lock);
337 skb_queue_splice_init(xfrm_backlog, &list);
338 spin_unlock(&xfrm_backlog->lock);
340 while (!skb_queue_empty(&list)) {
341 skb = __skb_dequeue(&list);
342 xfrm_dev_resume(skb);
348 static int xfrm_api_check(struct net_device *dev)
350 #ifdef CONFIG_XFRM_OFFLOAD
351 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
352 !(dev->features & NETIF_F_HW_ESP))
355 if ((dev->features & NETIF_F_HW_ESP) &&
356 (!(dev->xfrmdev_ops &&
357 dev->xfrmdev_ops->xdo_dev_state_add &&
358 dev->xfrmdev_ops->xdo_dev_state_delete)))
361 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
368 static int xfrm_dev_register(struct net_device *dev)
370 return xfrm_api_check(dev);
373 static int xfrm_dev_feat_change(struct net_device *dev)
375 return xfrm_api_check(dev);
378 static int xfrm_dev_down(struct net_device *dev)
380 if (dev->features & NETIF_F_HW_ESP)
381 xfrm_dev_state_flush(dev_net(dev), dev, true);
386 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
388 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
391 case NETDEV_REGISTER:
392 return xfrm_dev_register(dev);
394 case NETDEV_FEAT_CHANGE:
395 return xfrm_dev_feat_change(dev);
398 return xfrm_dev_down(dev);
403 static struct notifier_block xfrm_dev_notifier = {
404 .notifier_call = xfrm_dev_event,
407 void __init xfrm_dev_init(void)
409 register_netdevice_notifier(&xfrm_dev_notifier);