1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
7 #include "en_accel/ktls.h"
10 MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
14 MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
17 #define EXTRACT_INFO_FIELDS do { \
19 rec_seq = info->rec_seq; \
20 salt_sz = sizeof(info->salt); \
21 rec_seq_sz = sizeof(info->rec_seq); \
25 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
27 struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
28 struct tls12_crypto_info_aes_gcm_128 *info;
29 char *initial_rn, *gcm_iv;
30 u16 salt_sz, rec_seq_sz;
34 if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
37 info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
40 gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
41 initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
43 memcpy(gcm_iv, salt, salt_sz);
44 memcpy(initial_rn, rec_seq, rec_seq_sz);
46 tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
48 MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
49 MLX5_SET(tls_static_params, ctx, const_1, 1);
50 MLX5_SET(tls_static_params, ctx, const_2, 2);
51 MLX5_SET(tls_static_params, ctx, encryption_standard,
52 MLX5E_ENCRYPTION_STANDARD_TLS);
53 MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
57 build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
58 struct mlx5e_ktls_offload_context_tx *priv_tx,
61 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
62 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
64 #define STATIC_PARAMS_DS_CNT \
65 DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
67 cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
68 (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
70 STATIC_PARAMS_DS_CNT);
71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
72 cseg->imm = cpu_to_be32(priv_tx->tisn);
74 ucseg->flags = MLX5_UMR_INLINE;
75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
77 fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
81 fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
83 MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn);
84 MLX5_SET(tls_progress_params, ctx, record_tracker_state,
85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
86 MLX5_SET(tls_progress_params, ctx, auth_state,
87 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
91 build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
92 struct mlx5e_ktls_offload_context_tx *priv_tx,
95 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
97 #define PROGRESS_PARAMS_DS_CNT \
98 DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
100 cseg->opmod_idx_opcode =
101 cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
102 (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
103 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
104 PROGRESS_PARAMS_DS_CNT);
105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
107 fill_progress_params_ctx(wqe->data, priv_tx);
110 static void tx_fill_wi(struct mlx5e_txqsq *sq,
111 u16 pi, u8 num_wqebbs,
112 skb_frag_t *resync_dump_frag)
114 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
117 wi->num_wqebbs = num_wqebbs;
118 wi->resync_dump_frag = resync_dump_frag;
121 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
123 priv_tx->ctx_post_pending = true;
127 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
129 bool ret = priv_tx->ctx_post_pending;
131 priv_tx->ctx_post_pending = false;
137 post_static_params(struct mlx5e_txqsq *sq,
138 struct mlx5e_ktls_offload_context_tx *priv_tx,
141 struct mlx5e_umr_wqe *umr_wqe;
144 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
145 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
146 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL);
147 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
151 post_progress_params(struct mlx5e_txqsq *sq,
152 struct mlx5e_ktls_offload_context_tx *priv_tx,
155 struct mlx5e_tx_wqe *wqe;
158 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
159 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
160 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL);
161 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
165 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
166 struct mlx5e_ktls_offload_context_tx *priv_tx,
167 bool skip_static_post, bool fence_first_post)
169 bool progress_fence = skip_static_post || !fence_first_post;
171 if (!skip_static_post)
172 post_static_params(sq, priv_tx, fence_first_post);
174 post_progress_params(sq, priv_tx, progress_fence);
177 struct tx_sync_info {
181 skb_frag_t *frags[MAX_SKB_FRAGS];
184 static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
185 u32 tcp_seq, struct tx_sync_info *info)
187 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
188 struct tls_record_info *record;
189 int remaining, i = 0;
193 spin_lock_irqsave(&tx_ctx->lock, flags);
194 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
196 if (unlikely(!record)) {
201 if (unlikely(tcp_seq < tls_record_start_seq(record))) {
202 if (!tls_record_is_start_marker(record))
207 info->sync_len = tcp_seq - tls_record_start_seq(record);
208 remaining = info->sync_len;
209 while (remaining > 0) {
210 skb_frag_t *frag = &record->frags[i];
212 __skb_frag_ref(frag);
213 remaining -= skb_frag_size(frag);
214 info->frags[i++] = frag;
216 /* reduce the part which will be sent with the original SKB */
218 skb_frag_size_add(info->frags[i - 1], remaining);
221 spin_unlock_irqrestore(&tx_ctx->lock, flags);
226 tx_post_resync_params(struct mlx5e_txqsq *sq,
227 struct mlx5e_ktls_offload_context_tx *priv_tx,
230 struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
231 struct tls12_crypto_info_aes_gcm_128 *info;
232 __be64 rn_be = cpu_to_be64(rcd_sn);
233 bool skip_static_post;
237 if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
240 info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
241 rec_seq = info->rec_seq;
242 rec_seq_sz = sizeof(info->rec_seq);
244 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
245 if (!skip_static_post)
246 memcpy(rec_seq, &rn_be, rec_seq_sz);
248 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
252 tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253 skb_frag_t *frag, u32 tisn, bool first)
255 struct mlx5_wqe_ctrl_seg *cseg;
256 struct mlx5_wqe_eth_seg *eseg;
257 struct mlx5_wqe_data_seg *dseg;
258 struct mlx5e_tx_wqe *wqe;
259 dma_addr_t dma_addr = 0;
260 u16 ds_cnt, ds_cnt_inl;
265 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
266 ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
267 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
268 ds_cnt += ds_cnt_inl;
269 ds_cnt += 1; /* one frag */
271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
273 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
281 cseg->imm = cpu_to_be32(tisn);
282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
284 eseg->inline_hdr.sz = cpu_to_be16(ihs);
285 memcpy(eseg->inline_hdr.start, skb->data, ihs);
288 fsz = skb_frag_size(frag);
289 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
291 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
294 dseg->addr = cpu_to_be64(dma_addr);
295 dseg->lkey = sq->mkey_be;
296 dseg->byte_count = cpu_to_be32(fsz);
297 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
299 tx_fill_wi(sq, pi, num_wqebbs, frag);
300 sq->pc += num_wqebbs;
302 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
303 "unexpected DUMP num_wqebbs, %d > %d",
304 num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
309 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
310 struct mlx5e_tx_wqe_info *wi,
311 struct mlx5e_sq_dma *dma)
313 struct mlx5e_sq_stats *stats = sq->stats;
315 mlx5e_tx_dma_unmap(sq->pdev, dma);
316 __skb_frag_unref(wi->resync_dump_frag);
317 stats->tls_dump_packets++;
318 stats->tls_dump_bytes += wi->num_bytes;
321 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
323 struct mlx5_wq_cyc *wq = &sq->wq;
324 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
326 tx_fill_wi(sq, pi, 1, NULL);
328 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
331 static struct sk_buff *
332 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
333 struct mlx5e_txqsq *sq,
337 struct mlx5e_sq_stats *stats = sq->stats;
338 struct mlx5_wq_cyc *wq = &sq->wq;
339 struct tx_sync_info info = {};
340 u16 contig_wqebbs_room, pi;
344 if (!tx_sync_info_get(priv_tx, seq, &info)) {
345 /* We might get here if a retransmission reaches the driver
346 * after the relevant record is acked.
347 * It should be safe to drop the packet in this case
349 stats->tls_drop_no_sync_data++;
353 if (unlikely(info.sync_len < 0)) {
357 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
358 payload = skb->len - headln;
359 if (likely(payload <= -info.sync_len))
362 stats->tls_drop_bypass_req++;
368 num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
369 (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
370 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
371 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
372 if (unlikely(contig_wqebbs_room < num_wqebbs))
373 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
375 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
377 for (i = 0; i < info.nr_frags; i++)
378 if (tx_post_resync_dump(sq, skb, info.frags[i],
382 /* If no dump WQE was sent, we need to have a fence NOP WQE before the
386 tx_post_fence_nop(sq);
391 dev_kfree_skb_any(skb);
395 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
396 struct mlx5e_txqsq *sq,
398 struct mlx5e_tx_wqe **wqe, u16 *pi)
400 struct mlx5e_ktls_offload_context_tx *priv_tx;
401 struct mlx5e_sq_stats *stats = sq->stats;
402 struct mlx5_wqe_ctrl_seg *cseg;
403 struct tls_context *tls_ctx;
407 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
410 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
414 tls_ctx = tls_get_ctx(skb->sk);
415 if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev)))
418 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
420 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
421 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
422 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
426 seq = ntohl(tcp_hdr(skb)->seq);
427 if (unlikely(priv_tx->expected_seq != seq)) {
428 skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
431 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
434 priv_tx->expected_seq = seq + datalen;
436 cseg = &(*wqe)->ctrl;
437 cseg->imm = cpu_to_be32(priv_tx->tisn);
439 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
440 stats->tls_encrypted_bytes += datalen;
446 dev_kfree_skb_any(skb);