1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
9 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
11 enum mlx5e_icosq_wqe_type {
13 MLX5E_ICOSQ_WQE_UMR_RX,
17 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
19 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
22 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
26 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
27 memset(wqe, 0, wqe_size);
32 #define MLX5E_TX_FETCH_WQE(sq, pi) \
33 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
35 static inline struct mlx5e_tx_wqe *
36 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
38 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
39 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
40 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
42 memset(cseg, 0, sizeof(*cseg));
44 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
45 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
52 static inline struct mlx5e_tx_wqe *
53 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
55 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
56 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
57 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
59 memset(cseg, 0, sizeof(*cseg));
61 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
62 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
63 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
70 struct mlx5e_tx_wqe_info {
75 #ifdef CONFIG_MLX5_EN_TLS
76 struct page *resync_dump_frag_page;
80 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
82 struct mlx5_wq_cyc *wq = &sq->wq;
83 u16 pi, contig_wqebbs;
85 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
86 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
87 if (unlikely(contig_wqebbs < size)) {
88 struct mlx5e_tx_wqe_info *wi, *edge_wi;
90 wi = &sq->db.wqe_info[pi];
91 edge_wi = wi + contig_wqebbs;
93 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
94 for (; wi < edge_wi; wi++) {
95 *wi = (struct mlx5e_tx_wqe_info) {
98 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
100 sq->stats->nop += contig_wqebbs;
102 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
108 struct mlx5e_icosq_wqe_info {
112 /* Auxiliary data for different wqe types. */
120 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
122 struct mlx5_wq_cyc *wq = &sq->wq;
123 u16 pi, contig_wqebbs;
125 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
126 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
127 if (unlikely(contig_wqebbs < size)) {
128 struct mlx5e_icosq_wqe_info *wi, *edge_wi;
130 wi = &sq->db.wqe_info[pi];
131 edge_wi = wi + contig_wqebbs;
133 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
134 for (; wi < edge_wi; wi++) {
135 *wi = (struct mlx5e_icosq_wqe_info) {
136 .wqe_type = MLX5E_ICOSQ_WQE_NOP,
139 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
142 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
149 mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
152 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
154 edge_wi = wi + nnops;
156 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
157 for (; wi < edge_wi; wi++) {
158 memset(wi, 0, sizeof(*wi));
160 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
162 sq->stats->nop += nnops;
166 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
167 struct mlx5_wqe_ctrl_seg *ctrl)
169 ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
170 /* ensure wqe is visible to device before updating doorbell record */
173 *wq->db = cpu_to_be32(pc);
175 /* ensure doorbell record is visible to device before ringing the
180 mlx5_write64((__be32 *)ctrl, uar_map);
183 static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
185 return cseg && !!cseg->tisn;
189 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
194 if (mlx5e_transport_inline_tx_wqe(cseg))
195 return MLX5_INLINE_MODE_TCP_UDP;
197 mode = sq->min_inline_mode;
199 if (skb_vlan_tag_present(skb) &&
200 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
201 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
206 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
208 struct mlx5_core_cq *mcq;
211 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
214 static inline struct mlx5e_sq_dma *
215 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
217 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
221 mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
222 enum mlx5e_dma_map_type map_type)
224 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
228 dma->type = map_type;
232 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
235 case MLX5E_DMA_MAP_SINGLE:
236 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
238 case MLX5E_DMA_MAP_PAGE:
239 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
242 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
246 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
248 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
249 mlx5_wq_ll_reset(&rq->mpwqe.wq);
250 rq->mpwqe.actual_wq_head = 0;
252 mlx5_wq_cyc_reset(&rq->wqe.wq);
256 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
257 struct mlx5_err_cqe *err_cqe)
259 struct mlx5_cqwq *wq = &cq->wq;
262 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
264 netdev_err(cq->channel->netdev,
265 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
266 cq->mcq.cqn, ci, sqn,
267 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
268 err_cqe->syndrome, err_cqe->vendor_err_synd);
269 mlx5_dump_err_cqe(cq->mdev, err_cqe);
272 /* SW parser related functions */
274 struct mlx5e_swp_spec {
283 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
284 struct mlx5e_swp_spec *swp_spec)
286 /* SWP offsets are in 2-bytes words */
287 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
288 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
289 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
290 if (swp_spec->l4_proto) {
291 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
292 if (swp_spec->l4_proto == IPPROTO_UDP)
293 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
296 if (swp_spec->is_tun) {
297 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
298 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
299 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
300 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
301 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
302 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
303 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
305 switch (swp_spec->tun_l4_proto) {
307 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
310 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
315 static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
317 BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
319 /* A WQE must not cross the page boundary, hence two conditions:
320 * 1. Its size must not exceed the page size.
321 * 2. If the WQE size is X, and the space remaining in a page is less
322 * than X, this space needs to be padded with NOPs. So, one WQE of
323 * size X may require up to X-1 WQEBBs of padding, which makes the
324 * stop room of X-1 + X.
325 * WQE size is also limited by the hardware limit.
328 if (__builtin_constant_p(wqe_size))
329 BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
331 WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
333 return wqe_size * 2 - 1;