net/mlx5e: Remove unlikely() from WARN*() condition
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ktls_tx.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include <linux/tls.h>
5 #include "en.h"
6 #include "en/txrx.h"
7 #include "en_accel/ktls.h"
8
9 enum {
10         MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
11 };
12
13 enum {
14         MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
15 };
16
17 #define EXTRACT_INFO_FIELDS do { \
18         salt    = info->salt;    \
19         rec_seq = info->rec_seq; \
20         salt_sz    = sizeof(info->salt);    \
21         rec_seq_sz = sizeof(info->rec_seq); \
22 } while (0)
23
24 static void
25 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
26 {
27         struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
28         struct tls12_crypto_info_aes_gcm_128 *info;
29         char *initial_rn, *gcm_iv;
30         u16 salt_sz, rec_seq_sz;
31         char *salt, *rec_seq;
32         u8 tls_version;
33
34         if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
35                 return;
36
37         info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
38         EXTRACT_INFO_FIELDS;
39
40         gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
41         initial_rn  = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
42
43         memcpy(gcm_iv,      salt,    salt_sz);
44         memcpy(initial_rn,  rec_seq, rec_seq_sz);
45
46         tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
47
48         MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
49         MLX5_SET(tls_static_params, ctx, const_1, 1);
50         MLX5_SET(tls_static_params, ctx, const_2, 2);
51         MLX5_SET(tls_static_params, ctx, encryption_standard,
52                  MLX5E_ENCRYPTION_STANDARD_TLS);
53         MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
54 }
55
56 static void
57 build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
58                     struct mlx5e_ktls_offload_context_tx *priv_tx,
59                     bool fence)
60 {
61         struct mlx5_wqe_ctrl_seg     *cseg  = &wqe->ctrl;
62         struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
63
64 #define STATIC_PARAMS_DS_CNT \
65         DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
66
67         cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
68                                              (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
69         cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
70                                              STATIC_PARAMS_DS_CNT);
71         cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
72         cseg->tisn             = cpu_to_be32(priv_tx->tisn << 8);
73
74         ucseg->flags = MLX5_UMR_INLINE;
75         ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
76
77         fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
78 }
79
80 static void
81 fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
82 {
83         MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
84         MLX5_SET(tls_progress_params, ctx, record_tracker_state,
85                  MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
86         MLX5_SET(tls_progress_params, ctx, auth_state,
87                  MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
88 }
89
90 static void
91 build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
92                       struct mlx5e_ktls_offload_context_tx *priv_tx,
93                       bool fence)
94 {
95         struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
96
97 #define PROGRESS_PARAMS_DS_CNT \
98         DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
99
100         cseg->opmod_idx_opcode =
101                 cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
102                             (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
103         cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
104                                              PROGRESS_PARAMS_DS_CNT);
105         cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
106
107         fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
108 }
109
110 static void tx_fill_wi(struct mlx5e_txqsq *sq,
111                        u16 pi, u8 num_wqebbs,
112                        skb_frag_t *resync_dump_frag,
113                        u32 num_bytes)
114 {
115         struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
116
117         wi->skb              = NULL;
118         wi->num_wqebbs       = num_wqebbs;
119         wi->resync_dump_frag = resync_dump_frag;
120         wi->num_bytes        = num_bytes;
121 }
122
123 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
124 {
125         priv_tx->ctx_post_pending = true;
126 }
127
128 static bool
129 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
130 {
131         bool ret = priv_tx->ctx_post_pending;
132
133         priv_tx->ctx_post_pending = false;
134
135         return ret;
136 }
137
138 static void
139 post_static_params(struct mlx5e_txqsq *sq,
140                    struct mlx5e_ktls_offload_context_tx *priv_tx,
141                    bool fence)
142 {
143         struct mlx5e_umr_wqe *umr_wqe;
144         u16 pi;
145
146         umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
147         build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
148         tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
149         sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
150 }
151
152 static void
153 post_progress_params(struct mlx5e_txqsq *sq,
154                      struct mlx5e_ktls_offload_context_tx *priv_tx,
155                      bool fence)
156 {
157         struct mlx5e_tx_wqe *wqe;
158         u16 pi;
159
160         wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
161         build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
162         tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
163         sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
164 }
165
166 static void
167 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
168                               struct mlx5e_ktls_offload_context_tx *priv_tx,
169                               bool skip_static_post, bool fence_first_post)
170 {
171         bool progress_fence = skip_static_post || !fence_first_post;
172
173         if (!skip_static_post)
174                 post_static_params(sq, priv_tx, fence_first_post);
175
176         post_progress_params(sq, priv_tx, progress_fence);
177 }
178
179 struct tx_sync_info {
180         u64 rcd_sn;
181         s32 sync_len;
182         int nr_frags;
183         skb_frag_t *frags[MAX_SKB_FRAGS];
184 };
185
186 static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
187                              u32 tcp_seq, struct tx_sync_info *info)
188 {
189         struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
190         struct tls_record_info *record;
191         int remaining, i = 0;
192         unsigned long flags;
193         bool ret = true;
194
195         spin_lock_irqsave(&tx_ctx->lock, flags);
196         record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
197
198         if (unlikely(!record)) {
199                 ret = false;
200                 goto out;
201         }
202
203         if (unlikely(tcp_seq < tls_record_start_seq(record))) {
204                 if (!tls_record_is_start_marker(record))
205                         ret = false;
206                 goto out;
207         }
208
209         info->sync_len = tcp_seq - tls_record_start_seq(record);
210         remaining = info->sync_len;
211         while (remaining > 0) {
212                 skb_frag_t *frag = &record->frags[i];
213
214                 __skb_frag_ref(frag);
215                 remaining -= skb_frag_size(frag);
216                 info->frags[i++] = frag;
217         }
218         /* reduce the part which will be sent with the original SKB */
219         if (remaining < 0)
220                 skb_frag_size_add(info->frags[i - 1], remaining);
221         info->nr_frags = i;
222 out:
223         spin_unlock_irqrestore(&tx_ctx->lock, flags);
224         return ret;
225 }
226
227 static void
228 tx_post_resync_params(struct mlx5e_txqsq *sq,
229                       struct mlx5e_ktls_offload_context_tx *priv_tx,
230                       u64 rcd_sn)
231 {
232         struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
233         struct tls12_crypto_info_aes_gcm_128 *info;
234         __be64 rn_be = cpu_to_be64(rcd_sn);
235         bool skip_static_post;
236         u16 rec_seq_sz;
237         char *rec_seq;
238
239         if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
240                 return;
241
242         info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
243         rec_seq = info->rec_seq;
244         rec_seq_sz = sizeof(info->rec_seq);
245
246         skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
247         if (!skip_static_post)
248                 memcpy(rec_seq, &rn_be, rec_seq_sz);
249
250         mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
251 }
252
253 struct mlx5e_dump_wqe {
254         struct mlx5_wqe_ctrl_seg ctrl;
255         struct mlx5_wqe_data_seg data;
256 };
257
258 static int
259 tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
260                     skb_frag_t *frag, u32 tisn, bool first)
261 {
262         struct mlx5_wqe_ctrl_seg *cseg;
263         struct mlx5_wqe_data_seg *dseg;
264         struct mlx5e_dump_wqe *wqe;
265         dma_addr_t dma_addr = 0;
266         u8  num_wqebbs;
267         u16 ds_cnt;
268         int fsz;
269         u16 pi;
270
271         wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
272
273         ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
274         num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
275
276         cseg = &wqe->ctrl;
277         dseg = &wqe->data;
278
279         cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
280         cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
281         cseg->tisn             = cpu_to_be32(tisn << 8);
282         cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
283
284         fsz = skb_frag_size(frag);
285         dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
286                                     DMA_TO_DEVICE);
287         if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
288                 return -ENOMEM;
289
290         dseg->addr       = cpu_to_be64(dma_addr);
291         dseg->lkey       = sq->mkey_be;
292         dseg->byte_count = cpu_to_be32(fsz);
293         mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
294
295         tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
296         sq->pc += num_wqebbs;
297
298         WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
299              "unexpected DUMP num_wqebbs, %d > %d",
300              num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
301
302         return 0;
303 }
304
305 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
306                                            struct mlx5e_tx_wqe_info *wi,
307                                            struct mlx5e_sq_dma *dma)
308 {
309         struct mlx5e_sq_stats *stats = sq->stats;
310
311         mlx5e_tx_dma_unmap(sq->pdev, dma);
312         __skb_frag_unref(wi->resync_dump_frag);
313         stats->tls_dump_packets++;
314         stats->tls_dump_bytes += wi->num_bytes;
315 }
316
317 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
318 {
319         struct mlx5_wq_cyc *wq = &sq->wq;
320         u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
321
322         tx_fill_wi(sq, pi, 1, NULL, 0);
323
324         mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
325 }
326
327 static struct sk_buff *
328 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
329                          struct mlx5e_txqsq *sq,
330                          struct sk_buff *skb,
331                          u32 seq)
332 {
333         struct mlx5e_sq_stats *stats = sq->stats;
334         struct mlx5_wq_cyc *wq = &sq->wq;
335         struct tx_sync_info info = {};
336         u16 contig_wqebbs_room, pi;
337         u8 num_wqebbs;
338         int i;
339
340         if (!tx_sync_info_get(priv_tx, seq, &info)) {
341                 /* We might get here if a retransmission reaches the driver
342                  * after the relevant record is acked.
343                  * It should be safe to drop the packet in this case
344                  */
345                 stats->tls_drop_no_sync_data++;
346                 goto err_out;
347         }
348
349         if (unlikely(info.sync_len < 0)) {
350                 u32 payload;
351                 int headln;
352
353                 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
354                 payload = skb->len - headln;
355                 if (likely(payload <= -info.sync_len))
356                         return skb;
357
358                 stats->tls_drop_bypass_req++;
359                 goto err_out;
360         }
361
362         stats->tls_ooo++;
363
364         num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
365                 (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
366         pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
367         contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
368         if (unlikely(contig_wqebbs_room < num_wqebbs))
369                 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
370
371         tx_post_resync_params(sq, priv_tx, info.rcd_sn);
372
373         for (i = 0; i < info.nr_frags; i++)
374                 if (tx_post_resync_dump(sq, skb, info.frags[i],
375                                         priv_tx->tisn, !i))
376                         goto err_out;
377
378         /* If no dump WQE was sent, we need to have a fence NOP WQE before the
379          * actual data xmit.
380          */
381         if (!info.nr_frags)
382                 tx_post_fence_nop(sq);
383
384         return skb;
385
386 err_out:
387         dev_kfree_skb_any(skb);
388         return NULL;
389 }
390
391 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
392                                          struct mlx5e_txqsq *sq,
393                                          struct sk_buff *skb,
394                                          struct mlx5e_tx_wqe **wqe, u16 *pi)
395 {
396         struct mlx5e_ktls_offload_context_tx *priv_tx;
397         struct mlx5e_sq_stats *stats = sq->stats;
398         struct mlx5_wqe_ctrl_seg *cseg;
399         struct tls_context *tls_ctx;
400         int datalen;
401         u32 seq;
402
403         if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
404                 goto out;
405
406         datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
407         if (!datalen)
408                 goto out;
409
410         tls_ctx = tls_get_ctx(skb->sk);
411         if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
412                 goto err_out;
413
414         priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
415
416         if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
417                 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
418                 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
419                 stats->tls_ctx++;
420         }
421
422         seq = ntohl(tcp_hdr(skb)->seq);
423         if (unlikely(priv_tx->expected_seq != seq)) {
424                 skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
425                 if (unlikely(!skb))
426                         goto out;
427                 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
428         }
429
430         priv_tx->expected_seq = seq + datalen;
431
432         cseg = &(*wqe)->ctrl;
433         cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
434
435         stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
436         stats->tls_encrypted_bytes   += datalen;
437
438 out:
439         return skb;
440
441 err_out:
442         dev_kfree_skb_any(skb);
443         return NULL;
444 }