1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
10 #include "lib/crypto.h"
11 #include "en_accel/macsec.h"
12 #include "en_accel/macsec_fs.h"
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
17 enum mlx5_macsec_aso_event_arm {
18 MLX5E_ASO_EPN_ARM = BIT(0),
22 MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
25 struct mlx5e_macsec_handle {
26 struct mlx5e_macsec *macsec;
35 struct mlx5e_macsec_aso_out {
40 struct mlx5e_macsec_aso_in {
45 struct mlx5e_macsec_epn_state {
51 struct mlx5e_macsec_async_work {
52 struct mlx5e_macsec *macsec;
53 struct mlx5_core_dev *mdev;
54 struct work_struct work;
58 struct mlx5e_macsec_sa {
68 struct rhash_head hash;
70 union mlx5e_macsec_rule *macsec_rule;
71 struct rcu_head rcu_head;
72 struct mlx5e_macsec_epn_state epn_state;
75 struct mlx5e_macsec_rx_sc;
76 struct mlx5e_macsec_rx_sc_xarray_element {
78 struct mlx5e_macsec_rx_sc *rx_sc;
81 struct mlx5e_macsec_rx_sc {
84 struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
85 struct list_head rx_sc_list_element;
86 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
87 struct metadata_dst *md_dst;
88 struct rcu_head rcu_head;
91 struct mlx5e_macsec_umr {
92 u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
97 struct mlx5e_macsec_aso {
99 struct mlx5_aso *maso;
100 /* Protects macsec ASO */
101 struct mutex aso_lock;
103 struct mlx5e_macsec_umr *umr;
108 static const struct rhashtable_params rhash_sci = {
109 .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
110 .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
111 .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
112 .automatic_shrinking = true,
116 struct mlx5e_macsec_device {
117 const struct net_device *netdev;
118 struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
119 struct list_head macsec_rx_sc_list_head;
120 unsigned char *dev_addr;
121 struct list_head macsec_device_list_element;
124 struct mlx5e_macsec {
125 struct list_head macsec_device_list_head;
127 struct mlx5e_macsec_fs *macsec_fs;
128 struct mutex lock; /* Protects mlx5e_macsec internal contexts */
130 /* Tx sci -> fs id mapping handling */
131 struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
133 /* Rx fs_id -> rx_sc mapping */
134 struct xarray sc_xarray;
136 struct mlx5_core_dev *mdev;
139 struct mlx5e_macsec_stats stats;
142 struct mlx5e_macsec_aso aso;
144 struct notifier_block nb;
145 struct workqueue_struct *wq;
148 struct mlx5_macsec_obj_attrs {
154 struct mlx5e_macsec_epn_state epn_state;
161 struct mlx5_aso_ctrl_param {
163 u8 condition_0_operand;
164 u8 condition_1_operand;
165 u8 condition_0_offset;
166 u8 condition_1_offset;
168 u8 condition_operand;
169 u32 condition_0_data;
170 u32 condition_0_mask;
171 u32 condition_1_data;
172 u32 condition_1_mask;
177 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
179 struct mlx5e_macsec_umr *umr;
180 struct device *dma_device;
184 umr = kzalloc(sizeof(*umr), GFP_KERNEL);
190 dma_device = mlx5_core_dma_dev(mdev);
191 dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
192 err = dma_mapping_error(dma_device, dma_addr);
194 mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
198 err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
200 mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
204 umr->dma_addr = dma_addr;
211 dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
217 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
219 struct mlx5e_macsec_umr *umr = aso->umr;
221 mlx5_core_destroy_mkey(mdev, umr->mkey);
222 dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
226 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
230 if (!attrs->replay_protect)
233 switch (attrs->replay_window) {
235 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
238 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
241 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
244 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
249 MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
250 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
255 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
256 struct mlx5_macsec_obj_attrs *attrs,
260 u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
261 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
266 obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
267 aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
269 MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
270 MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
271 MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
272 MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
273 MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
276 if (attrs->epn_state.epn_enabled) {
280 MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
281 MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
282 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
283 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
284 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
285 salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
286 for (i = 0; i < 3 ; i++)
287 memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
289 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
292 MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
294 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
296 err = macsec_set_replay_protection(attrs, aso_ctx);
301 /* general object fields set */
302 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
303 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
305 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
308 "MACsec offload: Failed to create MACsec object (err = %d)\n",
313 *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
318 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
320 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
321 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
323 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
324 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
325 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
327 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
330 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
331 struct mlx5e_macsec_sa *sa,
334 int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
335 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
337 if ((is_tx) && sa->fs_id) {
338 /* Make sure ongoing datapath readers sees a valid SA */
339 rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
343 if (!sa->macsec_rule)
346 mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
347 mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
348 sa->macsec_rule = NULL;
351 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
352 struct mlx5e_macsec_sa *sa,
356 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
357 struct mlx5e_macsec *macsec = priv->macsec;
358 struct mlx5_macsec_rule_attrs rule_attrs;
359 struct mlx5_core_dev *mdev = priv->mdev;
360 struct mlx5_macsec_obj_attrs obj_attrs;
361 union mlx5e_macsec_rule *macsec_rule;
364 obj_attrs.next_pn = sa->next_pn;
365 obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
366 obj_attrs.enc_key_id = sa->enc_key_id;
367 obj_attrs.encrypt = encrypt;
368 obj_attrs.aso_pdn = macsec->aso.pdn;
369 obj_attrs.epn_state = sa->epn_state;
371 if (sa->epn_state.epn_enabled) {
372 obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
373 memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
376 obj_attrs.replay_window = ctx->secy->replay_window;
377 obj_attrs.replay_protect = ctx->secy->replay_protect;
379 err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
383 rule_attrs.macsec_obj_id = sa->macsec_obj_id;
384 rule_attrs.sci = sa->sci;
385 rule_attrs.assoc_num = sa->assoc_num;
386 rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
387 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
389 macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
392 goto destroy_macsec_object;
395 sa->macsec_rule = macsec_rule;
398 err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
400 goto destroy_macsec_object_and_rule;
405 destroy_macsec_object_and_rule:
406 mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
407 destroy_macsec_object:
408 mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
413 static struct mlx5e_macsec_rx_sc *
414 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
416 struct mlx5e_macsec_rx_sc *iter;
418 list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
419 if (iter->sci == sci)
426 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
427 struct mlx5e_macsec_sa *rx_sa,
430 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
431 struct mlx5e_macsec *macsec = priv->macsec;
434 if (rx_sa->active == active)
437 rx_sa->active = active;
439 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
443 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
445 rx_sa->active = false;
450 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
452 const struct net_device *netdev = ctx->netdev;
453 const struct macsec_secy *secy = ctx->secy;
455 if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
457 "MACsec offload is supported only when validate_frame is in strict mode\n");
461 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
462 netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
463 MACSEC_DEFAULT_ICV_LEN);
467 if (!secy->protect_frames) {
469 "MACsec offload is supported only when protect_frames is set\n");
473 if (!ctx->secy->tx_sc.encrypt) {
474 netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
481 static struct mlx5e_macsec_device *
482 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
483 const struct macsec_context *ctx)
485 struct mlx5e_macsec_device *iter;
486 const struct list_head *list;
488 list = &macsec->macsec_device_list_head;
489 list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
490 if (iter->netdev == ctx->secy->netdev)
497 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
498 const pn_t *next_pn_halves, ssci_t ssci)
500 struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
503 sa->salt = key->salt;
504 epn_state->epn_enabled = 1;
505 epn_state->epn_msb = next_pn_halves->upper;
506 epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
509 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
511 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
512 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
513 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
514 const struct macsec_secy *secy = ctx->secy;
515 struct mlx5e_macsec_device *macsec_device;
516 struct mlx5_core_dev *mdev = priv->mdev;
517 u8 assoc_num = ctx->sa.assoc_num;
518 struct mlx5e_macsec_sa *tx_sa;
519 struct mlx5e_macsec *macsec;
522 mutex_lock(&priv->macsec->lock);
524 macsec = priv->macsec;
525 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
526 if (!macsec_device) {
527 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
532 if (macsec_device->tx_sa[assoc_num]) {
533 netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
538 tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
544 tx_sa->active = ctx_tx_sa->active;
545 tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
546 tx_sa->sci = secy->sci;
547 tx_sa->assoc_num = assoc_num;
550 update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
553 err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
554 MLX5_ACCEL_OBJ_MACSEC_KEY,
559 macsec_device->tx_sa[assoc_num] = tx_sa;
560 if (!secy->operational ||
561 assoc_num != tx_sc->encoding_sa ||
565 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
567 goto destroy_encryption_key;
569 mutex_unlock(&macsec->lock);
573 destroy_encryption_key:
574 macsec_device->tx_sa[assoc_num] = NULL;
575 mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
579 mutex_unlock(&macsec->lock);
584 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
586 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
587 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
588 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
589 struct mlx5e_macsec_device *macsec_device;
590 u8 assoc_num = ctx->sa.assoc_num;
591 struct mlx5e_macsec_sa *tx_sa;
592 struct mlx5e_macsec *macsec;
593 struct net_device *netdev;
596 mutex_lock(&priv->macsec->lock);
598 macsec = priv->macsec;
599 netdev = ctx->netdev;
600 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
601 if (!macsec_device) {
602 netdev_err(netdev, "MACsec offload: Failed to find device context\n");
607 tx_sa = macsec_device->tx_sa[assoc_num];
609 netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
614 if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
615 netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
621 if (tx_sa->active == ctx_tx_sa->active)
624 tx_sa->active = ctx_tx_sa->active;
625 if (tx_sa->assoc_num != tx_sc->encoding_sa)
628 if (ctx_tx_sa->active) {
629 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
633 if (!tx_sa->macsec_rule) {
638 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
641 mutex_unlock(&macsec->lock);
646 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
648 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
649 struct mlx5e_macsec_device *macsec_device;
650 u8 assoc_num = ctx->sa.assoc_num;
651 struct mlx5e_macsec_sa *tx_sa;
652 struct mlx5e_macsec *macsec;
655 mutex_lock(&priv->macsec->lock);
656 macsec = priv->macsec;
657 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
658 if (!macsec_device) {
659 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
664 tx_sa = macsec_device->tx_sa[assoc_num];
666 netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
671 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
672 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
673 kfree_rcu_mightsleep(tx_sa);
674 macsec_device->tx_sa[assoc_num] = NULL;
677 mutex_unlock(&macsec->lock);
682 static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
684 struct mlx5e_macsec_sa *macsec_sa;
688 macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
690 fs_id = macsec_sa->fs_id;
696 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
698 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
699 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
700 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
701 struct mlx5e_macsec_device *macsec_device;
702 struct mlx5e_macsec_rx_sc *rx_sc;
703 struct list_head *rx_sc_list;
704 struct mlx5e_macsec *macsec;
707 mutex_lock(&priv->macsec->lock);
708 macsec = priv->macsec;
709 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
710 if (!macsec_device) {
711 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
716 rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
717 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
719 netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
725 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
731 sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
732 if (!sc_xarray_element) {
737 sc_xarray_element->rx_sc = rx_sc;
738 err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
739 XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
742 netdev_err(ctx->netdev,
743 "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
744 MLX5_MACEC_RX_FS_ID_MAX);
745 goto destroy_sc_xarray_elemenet;
748 rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
749 if (!rx_sc->md_dst) {
754 rx_sc->sci = ctx_rx_sc->sci;
755 rx_sc->active = ctx_rx_sc->active;
756 list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
758 rx_sc->sc_xarray_element = sc_xarray_element;
759 rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
760 mutex_unlock(&macsec->lock);
765 xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
766 destroy_sc_xarray_elemenet:
767 kfree(sc_xarray_element);
772 mutex_unlock(&macsec->lock);
777 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
779 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
780 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
781 struct mlx5e_macsec_device *macsec_device;
782 struct mlx5e_macsec_rx_sc *rx_sc;
783 struct mlx5e_macsec_sa *rx_sa;
784 struct mlx5e_macsec *macsec;
785 struct list_head *list;
789 mutex_lock(&priv->macsec->lock);
791 macsec = priv->macsec;
792 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
793 if (!macsec_device) {
794 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
799 list = &macsec_device->macsec_rx_sc_list_head;
800 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
806 if (rx_sc->active == ctx_rx_sc->active)
809 rx_sc->active = ctx_rx_sc->active;
810 for (i = 0; i < MACSEC_NUM_AN; ++i) {
811 rx_sa = rx_sc->rx_sa[i];
815 err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
821 mutex_unlock(&macsec->lock);
826 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
828 struct mlx5e_macsec_sa *rx_sa;
831 for (i = 0; i < MACSEC_NUM_AN; ++i) {
832 rx_sa = rx_sc->rx_sa[i];
836 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
837 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
840 rx_sc->rx_sa[i] = NULL;
843 /* At this point the relevant MACsec offload Rx rule already removed at
844 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
845 * Rx related data propagating using xa_erase which uses rcu to sync,
846 * once fs_id is erased then this rx_sc is hidden from datapath.
848 list_del_rcu(&rx_sc->rx_sc_list_element);
849 xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
850 metadata_dst_free(rx_sc->md_dst);
851 kfree(rx_sc->sc_xarray_element);
852 kfree_rcu_mightsleep(rx_sc);
855 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
857 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
858 struct mlx5e_macsec_device *macsec_device;
859 struct mlx5e_macsec_rx_sc *rx_sc;
860 struct mlx5e_macsec *macsec;
861 struct list_head *list;
864 mutex_lock(&priv->macsec->lock);
866 macsec = priv->macsec;
867 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
868 if (!macsec_device) {
869 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
874 list = &macsec_device->macsec_rx_sc_list_head;
875 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
877 netdev_err(ctx->netdev,
878 "MACsec offload rx_sc sci %lld doesn't exist\n",
879 ctx->sa.rx_sa->sc->sci);
884 macsec_del_rxsc_ctx(macsec, rx_sc);
886 mutex_unlock(&macsec->lock);
891 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
893 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
894 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
895 struct mlx5e_macsec_device *macsec_device;
896 struct mlx5_core_dev *mdev = priv->mdev;
897 u8 assoc_num = ctx->sa.assoc_num;
898 struct mlx5e_macsec_rx_sc *rx_sc;
899 sci_t sci = ctx_rx_sa->sc->sci;
900 struct mlx5e_macsec_sa *rx_sa;
901 struct mlx5e_macsec *macsec;
902 struct list_head *list;
905 mutex_lock(&priv->macsec->lock);
907 macsec = priv->macsec;
908 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
909 if (!macsec_device) {
910 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
915 list = &macsec_device->macsec_rx_sc_list_head;
916 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
918 netdev_err(ctx->netdev,
919 "MACsec offload rx_sc sci %lld doesn't exist\n",
920 ctx->sa.rx_sa->sc->sci);
925 if (rx_sc->rx_sa[assoc_num]) {
926 netdev_err(ctx->netdev,
927 "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
933 rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
939 rx_sa->active = ctx_rx_sa->active;
940 rx_sa->next_pn = ctx_rx_sa->next_pn;
942 rx_sa->assoc_num = assoc_num;
943 rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
946 update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
949 err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
950 MLX5_ACCEL_OBJ_MACSEC_KEY,
955 rx_sc->rx_sa[assoc_num] = rx_sa;
959 //TODO - add support for both authentication and encryption flows
960 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
962 goto destroy_encryption_key;
966 destroy_encryption_key:
967 rx_sc->rx_sa[assoc_num] = NULL;
968 mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
972 mutex_unlock(&macsec->lock);
977 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
979 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
980 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
981 struct mlx5e_macsec_device *macsec_device;
982 u8 assoc_num = ctx->sa.assoc_num;
983 struct mlx5e_macsec_rx_sc *rx_sc;
984 sci_t sci = ctx_rx_sa->sc->sci;
985 struct mlx5e_macsec_sa *rx_sa;
986 struct mlx5e_macsec *macsec;
987 struct list_head *list;
990 mutex_lock(&priv->macsec->lock);
992 macsec = priv->macsec;
993 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
994 if (!macsec_device) {
995 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1000 list = &macsec_device->macsec_rx_sc_list_head;
1001 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1003 netdev_err(ctx->netdev,
1004 "MACsec offload rx_sc sci %lld doesn't exist\n",
1005 ctx->sa.rx_sa->sc->sci);
1010 rx_sa = rx_sc->rx_sa[assoc_num];
1012 netdev_err(ctx->netdev,
1013 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1019 if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
1020 netdev_err(ctx->netdev,
1021 "MACsec offload update RX sa %d PN isn't supported\n",
1027 err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
1029 mutex_unlock(&macsec->lock);
1034 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1036 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1037 struct mlx5e_macsec_device *macsec_device;
1038 sci_t sci = ctx->sa.rx_sa->sc->sci;
1039 struct mlx5e_macsec_rx_sc *rx_sc;
1040 u8 assoc_num = ctx->sa.assoc_num;
1041 struct mlx5e_macsec_sa *rx_sa;
1042 struct mlx5e_macsec *macsec;
1043 struct list_head *list;
1046 mutex_lock(&priv->macsec->lock);
1048 macsec = priv->macsec;
1049 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1050 if (!macsec_device) {
1051 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1056 list = &macsec_device->macsec_rx_sc_list_head;
1057 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1059 netdev_err(ctx->netdev,
1060 "MACsec offload rx_sc sci %lld doesn't exist\n",
1061 ctx->sa.rx_sa->sc->sci);
1066 rx_sa = rx_sc->rx_sa[assoc_num];
1068 netdev_err(ctx->netdev,
1069 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1075 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1076 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1078 rx_sc->rx_sa[assoc_num] = NULL;
1081 mutex_unlock(&macsec->lock);
1086 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1088 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1089 const struct net_device *dev = ctx->secy->netdev;
1090 const struct net_device *netdev = ctx->netdev;
1091 struct mlx5e_macsec_device *macsec_device;
1092 struct mlx5e_macsec *macsec;
1095 if (!mlx5e_macsec_secy_features_validate(ctx))
1098 mutex_lock(&priv->macsec->lock);
1099 macsec = priv->macsec;
1100 if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1101 netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1105 if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1106 netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1107 MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1112 macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1113 if (!macsec_device) {
1118 macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1119 if (!macsec_device->dev_addr) {
1120 kfree(macsec_device);
1125 macsec_device->netdev = dev;
1127 INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1128 list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1130 ++macsec->num_of_devices;
1132 mutex_unlock(&macsec->lock);
1137 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1138 struct mlx5e_macsec_device *macsec_device)
1140 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1141 const struct net_device *dev = ctx->secy->netdev;
1142 struct mlx5e_macsec *macsec = priv->macsec;
1143 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1144 struct mlx5e_macsec_sa *rx_sa;
1145 struct list_head *list;
1149 list = &macsec_device->macsec_rx_sc_list_head;
1150 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1151 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1152 rx_sa = rx_sc->rx_sa[i];
1153 if (!rx_sa || !rx_sa->macsec_rule)
1156 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1160 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1161 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1162 rx_sa = rx_sc->rx_sa[i];
1166 if (rx_sa->active) {
1167 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
1174 memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1179 /* this function is called from 2 macsec ops functions:
1180 * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1181 * and create new Tx contexts(macsec object + steering).
1182 * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1183 * destroy Tx and Rx contexts(macsec object + steering)
1185 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1187 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1188 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1189 const struct net_device *dev = ctx->secy->netdev;
1190 struct mlx5e_macsec_device *macsec_device;
1191 struct mlx5e_macsec_sa *tx_sa;
1192 struct mlx5e_macsec *macsec;
1195 if (!mlx5e_macsec_secy_features_validate(ctx))
1198 mutex_lock(&priv->macsec->lock);
1200 macsec = priv->macsec;
1201 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1202 if (!macsec_device) {
1203 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1208 /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1209 if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1210 err = macsec_upd_secy_hw_address(ctx, macsec_device);
1215 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1216 tx_sa = macsec_device->tx_sa[i];
1220 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1223 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1224 tx_sa = macsec_device->tx_sa[i];
1228 if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1229 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
1236 mutex_unlock(&macsec->lock);
1241 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1243 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1244 struct mlx5e_macsec_device *macsec_device;
1245 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1246 struct mlx5e_macsec_sa *tx_sa;
1247 struct mlx5e_macsec *macsec;
1248 struct list_head *list;
1252 mutex_lock(&priv->macsec->lock);
1253 macsec = priv->macsec;
1254 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1255 if (!macsec_device) {
1256 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1262 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1263 tx_sa = macsec_device->tx_sa[i];
1267 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1268 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1270 macsec_device->tx_sa[i] = NULL;
1273 list = &macsec_device->macsec_rx_sc_list_head;
1274 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1275 macsec_del_rxsc_ctx(macsec, rx_sc);
1277 kfree(macsec_device->dev_addr);
1278 macsec_device->dev_addr = NULL;
1280 list_del_rcu(&macsec_device->macsec_device_list_element);
1281 --macsec->num_of_devices;
1282 kfree(macsec_device);
1285 mutex_unlock(&macsec->lock);
1290 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1291 struct mlx5_macsec_obj_attrs *attrs)
1293 attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1294 attrs->epn_state.overlap = sa->epn_state.overlap;
1297 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1298 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1299 struct mlx5_aso_ctrl_param *param)
1301 struct mlx5e_macsec_umr *umr = macsec_aso->umr;
1303 memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1304 aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
1305 aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
1306 aso_ctrl->l_key = cpu_to_be32(umr->mkey);
1311 aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1312 aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1313 param->condition_0_operand << 4;
1314 aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1315 param->condition_0_offset << 4;
1316 aso_ctrl->data_offset_condition_operand = param->data_offset |
1317 param->condition_operand << 6;
1318 aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1319 aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1320 aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1321 aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1322 aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1323 aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1326 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1329 u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1330 u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1331 u64 modify_field_select = 0;
1335 /* General object fields set */
1336 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1337 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1338 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1339 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1341 mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1346 obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1347 modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1350 if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1351 !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1352 mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1357 obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1358 MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1359 MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1360 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1361 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1363 /* General object fields set */
1364 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1366 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1369 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1370 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1371 struct mlx5e_macsec_aso_in *in)
1373 struct mlx5_aso_ctrl_param param = {};
1375 param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1376 param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1377 param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1378 if (in->mode == MLX5_MACSEC_EPN) {
1379 param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1380 param.bitwise_data = BIT_ULL(54);
1381 param.data_mask = param.bitwise_data;
1383 macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, ¶m);
1386 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1387 struct mlx5e_macsec_aso_in *in)
1389 struct mlx5e_macsec_aso *aso;
1390 struct mlx5_aso_wqe *aso_wqe;
1391 struct mlx5_aso *maso;
1397 mutex_lock(&aso->aso_lock);
1398 aso_wqe = mlx5_aso_get_wqe(maso);
1399 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1400 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1401 macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1402 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1403 err = mlx5_aso_poll_cq(maso, false);
1404 mutex_unlock(&aso->aso_lock);
1409 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1410 struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1412 struct mlx5e_macsec_aso *aso;
1413 struct mlx5_aso_wqe *aso_wqe;
1414 struct mlx5_aso *maso;
1415 unsigned long expires;
1421 mutex_lock(&aso->aso_lock);
1423 aso_wqe = mlx5_aso_get_wqe(maso);
1424 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1425 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1426 macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1428 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1429 expires = jiffies + msecs_to_jiffies(10);
1431 err = mlx5_aso_poll_cq(maso, false);
1433 usleep_range(2, 10);
1434 } while (err && time_is_after_jiffies(expires));
1439 if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1440 out->event_arm |= MLX5E_ASO_EPN_ARM;
1442 out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1445 mutex_unlock(&aso->aso_lock);
1449 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1452 const struct list_head *device_list;
1453 struct mlx5e_macsec_sa *macsec_sa;
1454 struct mlx5e_macsec_device *iter;
1457 device_list = &macsec->macsec_device_list_head;
1459 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1460 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1461 macsec_sa = iter->tx_sa[i];
1462 if (!macsec_sa || !macsec_sa->active)
1464 if (macsec_sa->macsec_obj_id == obj_id)
1472 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1475 const struct list_head *device_list, *sc_list;
1476 struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1477 struct mlx5e_macsec_sa *macsec_sa;
1478 struct mlx5e_macsec_device *iter;
1481 device_list = &macsec->macsec_device_list_head;
1483 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1484 sc_list = &iter->macsec_rx_sc_list_head;
1485 list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1486 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1487 macsec_sa = mlx5e_rx_sc->rx_sa[i];
1488 if (!macsec_sa || !macsec_sa->active)
1490 if (macsec_sa->macsec_obj_id == obj_id)
1499 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1500 struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1502 struct mlx5_macsec_obj_attrs attrs = {};
1503 struct mlx5e_macsec_aso_in in = {};
1505 /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1506 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1507 * esn_overlap to OLD (1).
1508 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1509 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1510 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1513 if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1514 sa->epn_state.epn_msb++;
1515 sa->epn_state.overlap = 0;
1517 sa->epn_state.overlap = 1;
1520 macsec_build_accel_attrs(sa, &attrs);
1521 mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1523 /* Re-set EPN arm event */
1525 in.mode = MLX5_MACSEC_EPN;
1526 macsec_aso_set_arm_event(mdev, macsec, &in);
1529 static void macsec_async_event(struct work_struct *work)
1531 struct mlx5e_macsec_async_work *async_work;
1532 struct mlx5e_macsec_aso_out out = {};
1533 struct mlx5e_macsec_aso_in in = {};
1534 struct mlx5e_macsec_sa *macsec_sa;
1535 struct mlx5e_macsec *macsec;
1536 struct mlx5_core_dev *mdev;
1539 async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1540 macsec = async_work->macsec;
1541 mutex_lock(&macsec->lock);
1543 mdev = async_work->mdev;
1544 obj_id = async_work->obj_id;
1545 macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1547 macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1549 mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1550 goto out_async_work;
1554 /* Query MACsec ASO context */
1556 macsec_aso_query(mdev, macsec, &in, &out);
1559 if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1560 macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1564 mutex_unlock(&macsec->lock);
1567 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1569 struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1570 struct mlx5e_macsec_async_work *async_work;
1571 struct mlx5_eqe_obj_change *obj_change;
1572 struct mlx5_eqe *eqe = data;
1576 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1579 obj_change = &eqe->data.obj_change;
1580 obj_type = be16_to_cpu(obj_change->obj_type);
1581 obj_id = be32_to_cpu(obj_change->obj_id);
1583 if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1586 async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1590 async_work->macsec = macsec;
1591 async_work->mdev = macsec->mdev;
1592 async_work->obj_id = obj_id;
1594 INIT_WORK(&async_work->work, macsec_async_event);
1596 WARN_ON(!queue_work(macsec->wq, &async_work->work));
1601 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1603 struct mlx5_aso *maso;
1606 err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1609 "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1614 maso = mlx5_aso_create(mdev, aso->pdn);
1616 err = PTR_ERR(maso);
1620 err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1624 mutex_init(&aso->aso_lock);
1631 mlx5_aso_destroy(maso);
1633 mlx5_core_dealloc_pd(mdev, aso->pdn);
1637 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1642 mlx5e_macsec_aso_dereg_mr(mdev, aso);
1644 mlx5_aso_destroy(aso->maso);
1646 mlx5_core_dealloc_pd(mdev, aso->pdn);
1649 bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
1651 if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
1652 MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
1655 if (!MLX5_CAP_GEN(mdev, log_max_dek))
1658 if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
1661 if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
1662 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
1665 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
1666 !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
1669 if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
1670 !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
1673 if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
1674 !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
1680 void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
1682 mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
1685 struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
1690 return &macsec->stats;
1693 static const struct macsec_ops macsec_offload_ops = {
1694 .mdo_add_txsa = mlx5e_macsec_add_txsa,
1695 .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1696 .mdo_del_txsa = mlx5e_macsec_del_txsa,
1697 .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1698 .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1699 .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1700 .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1701 .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1702 .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1703 .mdo_add_secy = mlx5e_macsec_add_secy,
1704 .mdo_upd_secy = mlx5e_macsec_upd_secy,
1705 .mdo_del_secy = mlx5e_macsec_del_secy,
1708 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1710 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1713 fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1720 dev_kfree_skb_any(skb);
1724 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1725 struct sk_buff *skb,
1726 struct mlx5_wqe_eth_seg *eseg)
1728 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1731 fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1735 eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1738 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1739 struct sk_buff *skb,
1740 struct mlx5_cqe64 *cqe)
1742 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1743 u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1744 struct mlx5e_priv *priv = netdev_priv(netdev);
1745 struct mlx5e_macsec_rx_sc *rx_sc;
1746 struct mlx5e_macsec *macsec;
1749 macsec = priv->macsec;
1753 fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1756 sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1757 rx_sc = sc_xarray_element->rx_sc;
1759 dst_hold(&rx_sc->md_dst->dst);
1760 skb_dst_set(skb, &rx_sc->md_dst->dst);
1766 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1768 struct net_device *netdev = priv->netdev;
1770 if (!mlx5e_is_macsec_device(priv->mdev))
1774 mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1775 netdev->macsec_ops = &macsec_offload_ops;
1776 netdev->features |= NETIF_F_HW_MACSEC;
1777 netif_keep_dst(netdev);
1780 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1782 struct mlx5_core_dev *mdev = priv->mdev;
1783 struct mlx5e_macsec *macsec = NULL;
1784 struct mlx5e_macsec_fs *macsec_fs;
1787 if (!mlx5e_is_macsec_device(priv->mdev)) {
1788 mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1792 macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1796 INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1797 mutex_init(&macsec->lock);
1799 err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
1801 mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
1806 err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1808 mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1812 macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1818 xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1820 priv->macsec = macsec;
1822 macsec->mdev = mdev;
1824 macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
1830 macsec->macsec_fs = macsec_fs;
1832 macsec->nb.notifier_call = macsec_obj_change_event;
1833 mlx5_notifier_register(mdev, &macsec->nb);
1835 mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1840 destroy_workqueue(macsec->wq);
1842 mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1844 rhashtable_destroy(&macsec->sci_hash);
1847 priv->macsec = NULL;
1851 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1853 struct mlx5e_macsec *macsec = priv->macsec;
1854 struct mlx5_core_dev *mdev = priv->mdev;
1859 mlx5_notifier_unregister(mdev, &macsec->nb);
1860 mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
1861 destroy_workqueue(macsec->wq);
1862 mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1863 rhashtable_destroy(&macsec->sci_hash);
1864 mutex_destroy(&macsec->lock);