2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #ifndef __MLX5E_IPSEC_RXTX_H__
35 #define __MLX5E_IPSEC_RXTX_H__
37 #include <linux/skbuff.h>
42 /* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
43 #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
44 #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
45 #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
47 struct mlx5e_accel_tx_ipsec_state {
48 struct xfrm_offload *xo;
54 #ifdef CONFIG_MLX5_EN_IPSEC
56 struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
57 struct sk_buff *skb, u32 *cqe_bcnt);
59 void mlx5e_ipsec_inverse_table_init(void);
60 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
61 struct xfrm_offload *xo);
62 void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
63 struct xfrm_offload *xo);
64 bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
66 struct mlx5e_accel_tx_ipsec_state *ipsec_st);
67 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
68 struct mlx5e_accel_tx_ipsec_state *ipsec_st,
69 struct mlx5_wqe_inline_seg *inlseg);
70 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
72 struct mlx5_cqe64 *cqe);
73 static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
75 return ipsec_st->tailen;
78 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
80 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
83 static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
88 static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
90 return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
93 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
94 struct mlx5_wqe_eth_seg *eseg);
96 static inline netdev_features_t
97 mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
99 struct xfrm_offload *xo = xfrm_offload(skb);
100 struct sec_path *sp = skb_sec_path(skb);
102 if (sp && sp->len && xo) {
103 struct xfrm_state *x = sp->xvec[0];
105 if (!x || !x->xso.offload_handle)
108 if (xo->inner_ipproto) {
109 /* Cannot support tunnel packet over IPsec tunnel mode
110 * because we cannot offload three IP header csum
112 if (x->props.mode == XFRM_MODE_TUNNEL)
115 /* Only support UDP or TCP L4 checksum */
116 if (xo->inner_ipproto != IPPROTO_UDP &&
117 xo->inner_ipproto != IPPROTO_TCP)
125 /* Disable CSUM and GSO for software IPsec */
127 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
131 mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
132 struct mlx5_wqe_eth_seg *eseg)
136 if (!mlx5e_ipsec_eseg_meta(eseg))
139 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
140 inner_ipproto = xfrm_offload(skb)->inner_ipproto;
142 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
143 if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
144 eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
145 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
146 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
147 sq->stats->csum_partial_inner++;
154 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
156 struct mlx5_cqe64 *cqe)
159 static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
164 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
165 static inline netdev_features_t
166 mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
167 { return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
170 mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
171 struct mlx5_wqe_eth_seg *eseg)
175 #endif /* CONFIG_MLX5_EN_IPSEC */
177 #endif /* __MLX5E_IPSEC_RXTX_H__ */