2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <crypto/aead.h>
37 #include "ipsec_offload.h"
38 #include "en_accel/ipsec_rxtx.h"
39 #include "en_accel/ipsec.h"
43 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
44 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
47 static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
49 unsigned int alen = crypto_aead_authsize(x->data);
50 struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
51 struct iphdr *ipv4hdr = ip_hdr(skb);
52 unsigned int trailer_len;
56 ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
60 trailer_len = alen + plen + 2;
62 pskb_trim(skb, skb->len - trailer_len);
63 if (skb->protocol == htons(ETH_P_IP)) {
64 ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
65 ip_send_check(ipv4hdr);
67 ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
73 static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
74 struct mlx5_wqe_eth_seg *eseg, u8 mode,
75 struct xfrm_offload *xo)
78 * SWP: OutL3 InL3 InL4
79 * Pkt: MAC IP ESP IP L4
85 * Tunnel(VXLAN TCP/UDP) over Transport Mode
86 * SWP: OutL3 InL3 InL4
87 * Pkt: MAC IP ESP UDP VXLAN IP L4
91 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
92 if (skb->protocol == htons(ETH_P_IPV6))
93 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
96 if (mode == XFRM_MODE_TUNNEL) {
97 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
98 if (xo->proto == IPPROTO_IPV6)
99 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
101 switch (xo->inner_ipproto) {
103 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
106 /* IP | ESP | IP | [TCP | UDP] */
107 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
116 if (mode != XFRM_MODE_TRANSPORT)
119 if (!xo->inner_ipproto) {
122 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
126 eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
132 /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
133 switch (xo->inner_ipproto) {
135 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
138 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
139 eseg->swp_inner_l4_offset =
140 (skb->csum_start + skb->head - skb->data) / 2;
141 if (inner_ip_hdr(skb)->version == 6)
142 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
151 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
152 struct xfrm_offload *xo)
154 struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
155 __u32 oseq = replay_esn->oseq;
160 if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
161 MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
162 seq_hi = xo->seq.hi - 1;
167 /* Place the SN in the IV field */
168 seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
169 iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
170 skb_store_bits(skb, iv_offset, &seqno, 8);
173 void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
174 struct xfrm_offload *xo)
179 /* Place the SN in the IV field */
180 seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
181 iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
182 skb_store_bits(skb, iv_offset, &seqno, 8);
185 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
186 struct mlx5e_accel_tx_ipsec_state *ipsec_st,
187 struct mlx5_wqe_inline_seg *inlseg)
189 inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
190 esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
193 static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
195 struct xfrm_state *x,
196 struct xfrm_offload *xo,
197 struct mlx5e_accel_tx_ipsec_state *ipsec_st)
199 unsigned int blksize, clen, alen, plen;
200 struct crypto_aead *aead;
206 alen = crypto_aead_authsize(aead);
207 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
208 clen = ALIGN(skb->len + 2, blksize);
209 plen = max_t(u32, clen - skb->len, 4);
210 tailen = plen + alen;
211 ipsec_st->plen = plen;
212 ipsec_st->tailen = tailen;
217 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
218 struct mlx5_wqe_eth_seg *eseg)
220 struct xfrm_offload *xo = xfrm_offload(skb);
221 struct xfrm_encap_tmpl *encap;
222 struct xfrm_state *x;
226 sp = skb_sec_path(skb);
227 if (unlikely(sp->len != 1))
230 x = xfrm_input_state(skb);
234 if (unlikely(!x->xso.offload_handle ||
235 (skb->protocol != htons(ETH_P_IP) &&
236 skb->protocol != htons(ETH_P_IPV6))))
239 mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
241 l3_proto = (x->props.family == AF_INET) ?
242 ((struct iphdr *)skb_network_header(skb))->protocol :
243 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
245 eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
246 eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
249 eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
250 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
251 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
252 } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
253 eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
254 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
255 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
259 bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
261 struct mlx5e_accel_tx_ipsec_state *ipsec_st)
263 struct mlx5e_priv *priv = netdev_priv(netdev);
264 struct xfrm_offload *xo = xfrm_offload(skb);
265 struct mlx5e_ipsec_sa_entry *sa_entry;
266 struct xfrm_state *x;
269 sp = skb_sec_path(skb);
270 if (unlikely(sp->len != 1)) {
271 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
275 x = xfrm_input_state(skb);
277 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
281 if (unlikely(!x->xso.offload_handle ||
282 (skb->protocol != htons(ETH_P_IP) &&
283 skb->protocol != htons(ETH_P_IPV6)))) {
284 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
288 if (!skb_is_gso(skb))
289 if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
290 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
294 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
295 sa_entry->set_iv_op(skb, x, xo);
296 mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
306 MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
307 MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
308 MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
311 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
313 struct mlx5_cqe64 *cqe)
315 u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
316 struct mlx5e_priv *priv;
317 struct xfrm_offload *xo;
318 struct xfrm_state *xs;
322 sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
323 priv = netdev_priv(netdev);
324 sp = secpath_set(skb);
326 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
330 xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
332 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
336 sp = skb_sec_path(skb);
337 sp->xvec[sp->len++] = xs;
340 xo = xfrm_offload(skb);
341 xo->flags = CRYPTO_DONE;
343 switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
344 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
345 xo->status = CRYPTO_SUCCESS;
347 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
348 xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
350 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
351 xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
354 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);