1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
4 #include "en_accel/ktls_txrx.h"
5 #include "en_accel/ktls_utils.h"
7 struct mlx5e_dump_wqe {
8 struct mlx5_wqe_ctrl_seg ctrl;
9 struct mlx5_wqe_data_seg data;
12 #define MLX5E_KTLS_DUMP_WQEBBS \
13 (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
16 mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
17 unsigned int sync_len)
19 /* Given the MTU and sync_len, calculates an upper bound for the
20 * number of DUMP WQEs needed for the TX resync of a record.
22 return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
25 u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
27 u16 num_dumps, stop_room = 0;
29 num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
31 stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
32 stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
33 stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
38 static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
40 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
43 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
45 MLX5_SET(tisc, tisc, tls_en, 1);
47 return mlx5e_create_tis(mdev, in, tisn);
50 struct mlx5e_ktls_offload_context_tx {
51 struct tls_offload_context_tx *tx_ctx;
52 struct tls12_crypto_info_aes_gcm_128 crypto_info;
56 bool ctx_post_pending;
60 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
61 struct mlx5e_ktls_offload_context_tx *priv_tx)
63 struct mlx5e_ktls_offload_context_tx **ctx =
64 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
66 BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
67 TLS_OFFLOAD_CONTEXT_SIZE_TX);
72 static struct mlx5e_ktls_offload_context_tx *
73 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
75 struct mlx5e_ktls_offload_context_tx **ctx =
76 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
81 int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
82 struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
84 struct mlx5e_ktls_offload_context_tx *priv_tx;
85 struct tls_context *tls_ctx;
86 struct mlx5_core_dev *mdev;
87 struct mlx5e_priv *priv;
90 tls_ctx = tls_get_ctx(sk);
91 priv = netdev_priv(netdev);
94 priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
98 err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
102 priv_tx->expected_seq = start_offload_tcp_sn;
103 priv_tx->crypto_info =
104 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
105 priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
107 mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
109 err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
113 priv_tx->ctx_post_pending = true;
118 mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
124 void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
126 struct mlx5e_ktls_offload_context_tx *priv_tx;
127 struct mlx5_core_dev *mdev;
128 struct mlx5e_priv *priv;
130 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
131 priv = netdev_priv(netdev);
134 mlx5e_destroy_tis(mdev, priv_tx->tisn);
135 mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
139 static void tx_fill_wi(struct mlx5e_txqsq *sq,
140 u16 pi, u8 num_wqebbs, u32 num_bytes,
143 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
145 *wi = (struct mlx5e_tx_wqe_info) {
146 .num_wqebbs = num_wqebbs,
147 .num_bytes = num_bytes,
148 .resync_dump_frag_page = page,
153 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
155 bool ret = priv_tx->ctx_post_pending;
157 priv_tx->ctx_post_pending = false;
163 post_static_params(struct mlx5e_txqsq *sq,
164 struct mlx5e_ktls_offload_context_tx *priv_tx,
167 struct mlx5e_set_tls_static_params_wqe *wqe;
170 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
171 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
172 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
173 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
174 priv_tx->tisn, priv_tx->key_id, 0, fence,
175 TLS_OFFLOAD_CTX_DIR_TX);
176 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
177 sq->pc += num_wqebbs;
181 post_progress_params(struct mlx5e_txqsq *sq,
182 struct mlx5e_ktls_offload_context_tx *priv_tx,
185 struct mlx5e_set_tls_progress_params_wqe *wqe;
188 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
189 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
190 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
191 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
192 TLS_OFFLOAD_CTX_DIR_TX);
193 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
194 sq->pc += num_wqebbs;
198 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
199 struct mlx5e_ktls_offload_context_tx *priv_tx,
200 bool skip_static_post, bool fence_first_post)
202 bool progress_fence = skip_static_post || !fence_first_post;
204 if (!skip_static_post)
205 post_static_params(sq, priv_tx, fence_first_post);
207 post_progress_params(sq, priv_tx, progress_fence);
210 struct tx_sync_info {
214 skb_frag_t frags[MAX_SKB_FRAGS];
217 enum mlx5e_ktls_sync_retval {
218 MLX5E_KTLS_SYNC_DONE,
219 MLX5E_KTLS_SYNC_FAIL,
220 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
223 static enum mlx5e_ktls_sync_retval
224 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
225 u32 tcp_seq, int datalen, struct tx_sync_info *info)
227 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
228 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
229 struct tls_record_info *record;
230 int remaining, i = 0;
234 spin_lock_irqsave(&tx_ctx->lock, flags);
235 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
237 if (unlikely(!record)) {
238 ret = MLX5E_KTLS_SYNC_FAIL;
242 /* There are the following cases:
243 * 1. packet ends before start marker: bypass offload.
244 * 2. packet starts before start marker and ends after it: drop,
245 * not supported, breaks contract with kernel.
246 * 3. packet ends before tls record info starts: drop,
247 * this packet was already acknowledged and its record info
250 ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
252 if (unlikely(tls_record_is_start_marker(record))) {
253 ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
255 } else if (ends_before) {
256 ret = MLX5E_KTLS_SYNC_FAIL;
260 info->sync_len = tcp_seq - tls_record_start_seq(record);
261 remaining = info->sync_len;
262 while (remaining > 0) {
263 skb_frag_t *frag = &record->frags[i];
265 get_page(skb_frag_page(frag));
266 remaining -= skb_frag_size(frag);
267 info->frags[i++] = *frag;
269 /* reduce the part which will be sent with the original SKB */
271 skb_frag_size_add(&info->frags[i - 1], remaining);
274 spin_unlock_irqrestore(&tx_ctx->lock, flags);
279 tx_post_resync_params(struct mlx5e_txqsq *sq,
280 struct mlx5e_ktls_offload_context_tx *priv_tx,
283 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
284 __be64 rn_be = cpu_to_be64(rcd_sn);
285 bool skip_static_post;
289 rec_seq = info->rec_seq;
290 rec_seq_sz = sizeof(info->rec_seq);
292 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
293 if (!skip_static_post)
294 memcpy(rec_seq, &rn_be, rec_seq_sz);
296 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
300 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
302 struct mlx5_wqe_ctrl_seg *cseg;
303 struct mlx5_wqe_data_seg *dseg;
304 struct mlx5e_dump_wqe *wqe;
305 dma_addr_t dma_addr = 0;
310 BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
311 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
312 wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
314 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
319 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
320 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
321 cseg->tis_tir_num = cpu_to_be32(tisn << 8);
322 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
324 fsz = skb_frag_size(frag);
325 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
327 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
330 dseg->addr = cpu_to_be64(dma_addr);
331 dseg->lkey = sq->mkey_be;
332 dseg->byte_count = cpu_to_be32(fsz);
333 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
335 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
336 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
341 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
342 struct mlx5e_tx_wqe_info *wi,
345 struct mlx5e_sq_stats *stats;
346 struct mlx5e_sq_dma *dma;
348 if (!wi->resync_dump_frag_page)
351 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
354 mlx5e_tx_dma_unmap(sq->pdev, dma);
355 put_page(wi->resync_dump_frag_page);
356 stats->tls_dump_packets++;
357 stats->tls_dump_bytes += wi->num_bytes;
360 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
362 struct mlx5_wq_cyc *wq = &sq->wq;
363 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
365 tx_fill_wi(sq, pi, 1, 0, NULL);
367 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
370 static enum mlx5e_ktls_sync_retval
371 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
372 struct mlx5e_txqsq *sq,
376 struct mlx5e_sq_stats *stats = sq->stats;
377 enum mlx5e_ktls_sync_retval ret;
378 struct tx_sync_info info = {};
381 ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
382 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
383 if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
384 stats->tls_skip_no_sync_data++;
385 return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
387 /* We might get here if a retransmission reaches the driver
388 * after the relevant record is acked.
389 * It should be safe to drop the packet in this case
391 stats->tls_drop_no_sync_data++;
397 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
399 /* If no dump WQE was sent, we need to have a fence NOP WQE before the
402 if (!info.nr_frags) {
403 tx_post_fence_nop(sq);
404 return MLX5E_KTLS_SYNC_DONE;
407 for (; i < info.nr_frags; i++) {
408 unsigned int orig_fsz, frag_offset = 0, n = 0;
409 skb_frag_t *f = &info.frags[i];
411 orig_fsz = skb_frag_size(f);
414 bool fence = !(i || frag_offset);
418 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
419 skb_frag_size_set(f, fsz);
420 if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
421 page_ref_add(skb_frag_page(f), n - 1);
425 skb_frag_off_add(f, fsz);
427 } while (frag_offset < orig_fsz);
429 page_ref_add(skb_frag_page(f), n - 1);
432 return MLX5E_KTLS_SYNC_DONE;
435 for (; i < info.nr_frags; i++)
436 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
437 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
438 * released only upon their completions (or in mlx5e_free_txqsq_descs,
439 * if channel closes).
441 put_page(skb_frag_page(&info.frags[i]));
443 return MLX5E_KTLS_SYNC_FAIL;
446 bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
447 struct sk_buff *skb, int datalen,
448 struct mlx5e_accel_tx_tls_state *state)
450 struct mlx5e_ktls_offload_context_tx *priv_tx;
451 struct mlx5e_sq_stats *stats = sq->stats;
454 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
456 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
457 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
461 seq = ntohl(tcp_hdr(skb)->seq);
462 if (unlikely(priv_tx->expected_seq != seq)) {
463 enum mlx5e_ktls_sync_retval ret =
464 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
467 case MLX5E_KTLS_SYNC_DONE:
469 case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
470 if (likely(!skb->decrypted))
474 case MLX5E_KTLS_SYNC_FAIL:
479 priv_tx->expected_seq = seq + datalen;
481 state->tls_tisn = priv_tx->tisn;
483 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
484 stats->tls_encrypted_bytes += datalen;
490 dev_kfree_skb_any(skb);