2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <linux/module.h>
41 #include "en_accel/ipsec.h"
42 #include "en_accel/ipsec_rxtx.h"
43 #include "en_accel/ipsec_fs.h"
45 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
47 struct mlx5e_ipsec_sa_entry *sa;
52 sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
60 struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
63 struct mlx5e_ipsec_sa_entry *sa_entry;
64 struct xfrm_state *ret = NULL;
67 hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
68 if (sa_entry->handle == handle) {
78 static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
81 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
82 struct mlx5e_ipsec_sa_entry *_sa_entry;
86 hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
87 if (_sa_entry->handle == handle) {
93 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
94 sa_entry->handle = handle;
95 hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
96 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
101 static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
103 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
106 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
107 hash_del_rcu(&sa_entry->hlist);
108 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
111 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
113 struct xfrm_replay_state_esn *replay_esn;
118 if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
119 sa_entry->esn_state.trigger = 0;
123 replay_esn = sa_entry->x->replay_esn;
124 if (replay_esn->seq >= replay_esn->replay_window)
125 seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
127 overlap = sa_entry->esn_state.overlap;
129 sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
131 esn = &sa_entry->esn_state.esn;
133 sa_entry->esn_state.trigger = 1;
134 if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
136 sa_entry->esn_state.overlap = 0;
138 } else if (unlikely(!overlap &&
139 (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
140 sa_entry->esn_state.overlap = 1;
148 mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
149 struct mlx5_accel_esp_xfrm_attrs *attrs)
151 struct xfrm_state *x = sa_entry->x;
152 struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
153 struct aead_geniv_ctx *geniv_ctx;
154 struct crypto_aead *aead;
155 unsigned int crypto_data_len, key_len;
158 memset(attrs, 0, sizeof(*attrs));
161 crypto_data_len = (x->aead->alg_key_len + 7) / 8;
162 key_len = crypto_data_len - 4; /* 4 bytes salt at end */
164 memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
165 aes_gcm->key_len = key_len * 8;
167 /* salt and seq_iv */
169 geniv_ctx = crypto_aead_ctx(aead);
170 ivsize = crypto_aead_ivsize(aead);
171 memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
172 memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
173 sizeof(aes_gcm->salt));
176 aes_gcm->icv_len = x->aead->alg_icv_len;
179 if (sa_entry->esn_state.trigger) {
180 attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
181 attrs->esn = sa_entry->esn_state.esn;
182 if (sa_entry->esn_state.overlap)
183 attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
187 attrs->sa_handle = sa_entry->handle;
190 attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
193 attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
194 MLX5_ACCEL_ESP_ACTION_ENCRYPT :
195 MLX5_ACCEL_ESP_ACTION_DECRYPT;
197 attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
198 MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
199 MLX5_ACCEL_ESP_FLAGS_TUNNEL;
202 attrs->spi = x->id.spi;
204 /* source , destination ips */
205 memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
206 memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
207 attrs->is_ipv6 = (x->props.family != AF_INET);
210 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
212 struct net_device *netdev = x->xso.real_dev;
213 struct mlx5e_priv *priv;
215 priv = netdev_priv(netdev);
217 if (x->props.aalgo != SADB_AALG_NONE) {
218 netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
221 if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
222 netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
225 if (x->props.calgo != SADB_X_CALG_NONE) {
226 netdev_info(netdev, "Cannot offload compressed xfrm states\n");
229 if (x->props.flags & XFRM_STATE_ESN &&
230 !(mlx5_accel_ipsec_device_caps(priv->mdev) &
231 MLX5_ACCEL_IPSEC_CAP_ESN)) {
232 netdev_info(netdev, "Cannot offload ESN xfrm states\n");
235 if (x->props.family != AF_INET &&
236 x->props.family != AF_INET6) {
237 netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
240 if (x->props.mode != XFRM_MODE_TRANSPORT &&
241 x->props.mode != XFRM_MODE_TUNNEL) {
242 dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
245 if (x->id.proto != IPPROTO_ESP) {
246 netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
250 netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
254 netdev_info(netdev, "Cannot offload xfrm states without aead\n");
257 if (x->aead->alg_icv_len != 128) {
258 netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
261 if ((x->aead->alg_key_len != 128 + 32) &&
262 (x->aead->alg_key_len != 256 + 32)) {
263 netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
267 netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
271 netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
274 if (strcmp(x->geniv, "seqiv")) {
275 netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
278 if (x->props.family == AF_INET6 &&
279 !(mlx5_accel_ipsec_device_caps(priv->mdev) &
280 MLX5_ACCEL_IPSEC_CAP_IPV6)) {
281 netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
287 static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
288 struct mlx5e_ipsec_sa_entry *sa_entry)
290 if (!mlx5_is_ipsec_device(priv->mdev))
293 return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
294 sa_entry->ipsec_obj_id,
295 &sa_entry->ipsec_rule);
298 static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
299 struct mlx5e_ipsec_sa_entry *sa_entry)
301 if (!mlx5_is_ipsec_device(priv->mdev))
304 mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
305 &sa_entry->ipsec_rule);
308 static int mlx5e_xfrm_add_state(struct xfrm_state *x)
310 struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
311 struct net_device *netdev = x->xso.real_dev;
312 struct mlx5_accel_esp_xfrm_attrs attrs;
313 struct mlx5e_priv *priv;
314 unsigned int sa_handle;
317 priv = netdev_priv(netdev);
319 err = mlx5e_xfrm_validate_state(x);
323 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
330 sa_entry->ipsec = priv->ipsec;
333 mlx5e_ipsec_update_esn_state(sa_entry);
336 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
338 mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
339 MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
340 if (IS_ERR(sa_entry->xfrm)) {
341 err = PTR_ERR(sa_entry->xfrm);
345 /* create hw context */
346 sa_entry->hw_context =
347 mlx5_accel_esp_create_hw_context(priv->mdev,
350 if (IS_ERR(sa_entry->hw_context)) {
351 err = PTR_ERR(sa_entry->hw_context);
355 sa_entry->ipsec_obj_id = sa_handle;
356 err = mlx5e_xfrm_fs_add_rule(priv, sa_entry);
360 if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
361 err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
365 sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
366 mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
369 x->xso.offload_handle = (unsigned long)sa_entry;
373 mlx5e_xfrm_fs_del_rule(priv, sa_entry);
375 mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context);
377 mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
385 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
387 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
392 if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
393 mlx5e_ipsec_sadb_rx_del(sa_entry);
396 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
398 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
399 struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
404 if (sa_entry->hw_context) {
405 flush_workqueue(sa_entry->ipsec->wq);
406 mlx5e_xfrm_fs_del_rule(priv, sa_entry);
407 mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context);
408 mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
414 int mlx5e_ipsec_init(struct mlx5e_priv *priv)
416 struct mlx5e_ipsec *ipsec = NULL;
418 if (!MLX5_IPSEC_DEV(priv->mdev)) {
419 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
423 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
427 hash_init(ipsec->sadb_rx);
428 spin_lock_init(&ipsec->sadb_rx_lock);
429 ida_init(&ipsec->halloc);
430 ipsec->en_priv = priv;
431 ipsec->en_priv->ipsec = ipsec;
432 ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
433 MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
434 ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
441 mlx5e_accel_ipsec_fs_init(priv);
442 netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
446 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
448 struct mlx5e_ipsec *ipsec = priv->ipsec;
453 mlx5e_accel_ipsec_fs_cleanup(priv);
454 destroy_workqueue(ipsec->wq);
456 ida_destroy(&ipsec->halloc);
461 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
463 if (x->props.family == AF_INET) {
464 /* Offload with IPv4 options is not supported yet */
465 if (ip_hdr(skb)->ihl > 5)
468 /* Offload with IPv6 extension headers is not support yet */
469 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
476 struct mlx5e_ipsec_modify_state_work {
477 struct work_struct work;
478 struct mlx5_accel_esp_xfrm_attrs attrs;
479 struct mlx5e_ipsec_sa_entry *sa_entry;
482 static void _update_xfrm_state(struct work_struct *work)
485 struct mlx5e_ipsec_modify_state_work *modify_work =
486 container_of(work, struct mlx5e_ipsec_modify_state_work, work);
487 struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
489 ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
490 &modify_work->attrs);
492 netdev_warn(sa_entry->ipsec->en_priv->netdev,
493 "Not an IPSec offload device\n");
498 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
500 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
501 struct mlx5e_ipsec_modify_state_work *modify_work;
507 need_update = mlx5e_ipsec_update_esn_state(sa_entry);
511 modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
515 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
516 modify_work->sa_entry = sa_entry;
518 INIT_WORK(&modify_work->work, _update_xfrm_state);
519 WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
522 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
523 .xdo_dev_state_add = mlx5e_xfrm_add_state,
524 .xdo_dev_state_delete = mlx5e_xfrm_del_state,
525 .xdo_dev_state_free = mlx5e_xfrm_free_state,
526 .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
527 .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
530 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
532 struct mlx5_core_dev *mdev = priv->mdev;
533 struct net_device *netdev = priv->netdev;
538 if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
539 !MLX5_CAP_ETH(mdev, swp)) {
540 mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
544 mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
545 netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
546 netdev->features |= NETIF_F_HW_ESP;
547 netdev->hw_enc_features |= NETIF_F_HW_ESP;
549 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
550 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
554 netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
555 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
557 if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
558 !MLX5_CAP_ETH(mdev, swp_lso)) {
559 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
563 if (mlx5_is_ipsec_device(mdev))
564 netdev->gso_partial_features |= NETIF_F_GSO_ESP;
566 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
567 netdev->features |= NETIF_F_GSO_ESP;
568 netdev->hw_features |= NETIF_F_GSO_ESP;
569 netdev->hw_enc_features |= NETIF_F_GSO_ESP;