1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
9 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
11 enum mlx5e_icosq_wqe_type {
13 MLX5E_ICOSQ_WQE_UMR_RX,
14 #ifdef CONFIG_MLX5_EN_TLS
15 MLX5E_ICOSQ_WQE_UMR_TLS,
16 MLX5E_ICOSQ_WQE_SET_PSV_TLS,
17 MLX5E_ICOSQ_WQE_GET_PSV_TLS,
22 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
24 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
27 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
31 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
32 memset(wqe, 0, wqe_size);
37 #define MLX5E_TX_FETCH_WQE(sq, pi) \
38 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
40 static inline struct mlx5e_tx_wqe *
41 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
43 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
44 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
45 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
47 memset(cseg, 0, sizeof(*cseg));
49 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
50 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
57 static inline struct mlx5e_tx_wqe *
58 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
60 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
61 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
62 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
64 memset(cseg, 0, sizeof(*cseg));
66 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
67 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
68 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
75 struct mlx5e_tx_wqe_info {
80 #ifdef CONFIG_MLX5_EN_TLS
81 struct page *resync_dump_frag_page;
85 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
87 struct mlx5_wq_cyc *wq = &sq->wq;
88 u16 pi, contig_wqebbs;
90 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
91 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
92 if (unlikely(contig_wqebbs < size)) {
93 struct mlx5e_tx_wqe_info *wi, *edge_wi;
95 wi = &sq->db.wqe_info[pi];
96 edge_wi = wi + contig_wqebbs;
98 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
99 for (; wi < edge_wi; wi++) {
100 *wi = (struct mlx5e_tx_wqe_info) {
103 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
105 sq->stats->nop += contig_wqebbs;
107 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
113 struct mlx5e_icosq_wqe_info {
117 /* Auxiliary data for different wqe types. */
122 #ifdef CONFIG_MLX5_EN_TLS
124 struct mlx5e_ktls_offload_context_rx *priv_rx;
127 struct mlx5e_ktls_rx_resync_buf *buf;
133 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
135 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
137 struct mlx5_wq_cyc *wq = &sq->wq;
138 u16 pi, contig_wqebbs;
140 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
141 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
142 if (unlikely(contig_wqebbs < size)) {
143 struct mlx5e_icosq_wqe_info *wi, *edge_wi;
145 wi = &sq->db.wqe_info[pi];
146 edge_wi = wi + contig_wqebbs;
148 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
149 for (; wi < edge_wi; wi++) {
150 *wi = (struct mlx5e_icosq_wqe_info) {
151 .wqe_type = MLX5E_ICOSQ_WQE_NOP,
154 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
157 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
164 mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
167 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
169 edge_wi = wi + nnops;
171 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
172 for (; wi < edge_wi; wi++) {
173 memset(wi, 0, sizeof(*wi));
175 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
177 sq->stats->nop += nnops;
181 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
182 struct mlx5_wqe_ctrl_seg *ctrl)
184 ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
185 /* ensure wqe is visible to device before updating doorbell record */
188 *wq->db = cpu_to_be32(pc);
190 /* ensure doorbell record is visible to device before ringing the
195 mlx5_write64((__be32 *)ctrl, uar_map);
198 static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
200 return cseg && !!cseg->tis_tir_num;
204 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
209 if (mlx5e_transport_inline_tx_wqe(cseg))
210 return MLX5_INLINE_MODE_TCP_UDP;
212 mode = sq->min_inline_mode;
214 if (skb_vlan_tag_present(skb) &&
215 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
216 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
221 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
223 struct mlx5_core_cq *mcq;
226 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
229 static inline struct mlx5e_sq_dma *
230 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
232 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
236 mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
237 enum mlx5e_dma_map_type map_type)
239 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
243 dma->type = map_type;
247 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
250 case MLX5E_DMA_MAP_SINGLE:
251 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
253 case MLX5E_DMA_MAP_PAGE:
254 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
257 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
261 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
263 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
264 mlx5_wq_ll_reset(&rq->mpwqe.wq);
265 rq->mpwqe.actual_wq_head = 0;
267 mlx5_wq_cyc_reset(&rq->wqe.wq);
271 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
272 struct mlx5_err_cqe *err_cqe)
274 struct mlx5_cqwq *wq = &cq->wq;
277 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
279 netdev_err(cq->channel->netdev,
280 "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
282 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
283 err_cqe->syndrome, err_cqe->vendor_err_synd);
284 mlx5_dump_err_cqe(cq->mdev, err_cqe);
287 static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
289 switch (rq->wq_type) {
290 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
291 return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
293 return mlx5_wq_cyc_get_size(&rq->wqe.wq);
297 static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
299 switch (rq->wq_type) {
300 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
301 return rq->mpwqe.wq.cur_sz;
303 return rq->wqe.wq.cur_sz;
307 static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
309 switch (rq->wq_type) {
310 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
311 return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
313 return mlx5_wq_cyc_get_head(&rq->wqe.wq);
317 static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
319 switch (rq->wq_type) {
320 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
321 return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
323 return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
327 /* SW parser related functions */
329 struct mlx5e_swp_spec {
338 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
339 struct mlx5e_swp_spec *swp_spec)
341 /* SWP offsets are in 2-bytes words */
342 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
343 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
344 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
345 if (swp_spec->l4_proto) {
346 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
347 if (swp_spec->l4_proto == IPPROTO_UDP)
348 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
351 if (swp_spec->is_tun) {
352 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
353 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
354 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
355 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
356 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
357 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
358 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
360 switch (swp_spec->tun_l4_proto) {
362 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
365 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
370 static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
372 BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
374 /* A WQE must not cross the page boundary, hence two conditions:
375 * 1. Its size must not exceed the page size.
376 * 2. If the WQE size is X, and the space remaining in a page is less
377 * than X, this space needs to be padded with NOPs. So, one WQE of
378 * size X may require up to X-1 WQEBBs of padding, which makes the
379 * stop room of X-1 + X.
380 * WQE size is also limited by the hardware limit.
383 if (__builtin_constant_p(wqe_size))
384 BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
386 WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
388 return wqe_size * 2 - 1;