1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
9 #define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
10 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
13 #ifndef CONFIG_MLX5_EN_TLS
14 #define MLX5E_SQ_TLS_ROOM (0)
16 /* TLS offload requires additional stop_room for:
18 * kTLS offload requires fixed additional stop_room for:
19 * - a static params WQE, and a progress params WQE.
20 * The additional MTU-depending room for the resync DUMP WQEs
21 * will be calculated and added in runtime.
23 #define MLX5E_SQ_TLS_ROOM \
24 (MLX5_SEND_WQE_MAX_WQEBBS + \
25 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
28 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
31 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
33 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
37 mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
39 struct mlx5_wq_cyc *wq = &sq->wq;
42 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
43 wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
49 static inline struct mlx5e_tx_wqe *
50 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
52 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
53 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
54 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
56 memset(cseg, 0, sizeof(*cseg));
58 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
59 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
66 static inline struct mlx5e_tx_wqe *
67 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
69 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
70 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
71 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
73 memset(cseg, 0, sizeof(*cseg));
75 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
76 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
77 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
85 mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
88 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
92 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
93 for (; wi < edge_wi; wi++) {
94 memset(wi, 0, sizeof(*wi));
96 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
98 sq->stats->nop += nnops;
102 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
103 struct mlx5_wqe_ctrl_seg *ctrl)
105 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
106 /* ensure wqe is visible to device before updating doorbell record */
109 *wq->db = cpu_to_be32(pc);
111 /* ensure doorbell record is visible to device before ringing the
116 mlx5_write64((__be32 *)ctrl, uar_map);
119 static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
121 return cseg && !!cseg->tisn;
125 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
130 if (mlx5e_transport_inline_tx_wqe(cseg))
131 return MLX5_INLINE_MODE_TCP_UDP;
133 mode = sq->min_inline_mode;
135 if (skb_vlan_tag_present(skb) &&
136 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
137 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
142 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
144 struct mlx5_core_cq *mcq;
147 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
150 static inline struct mlx5e_sq_dma *
151 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
153 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
157 mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
158 enum mlx5e_dma_map_type map_type)
160 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
164 dma->type = map_type;
168 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
171 case MLX5E_DMA_MAP_SINGLE:
172 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
174 case MLX5E_DMA_MAP_PAGE:
175 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
178 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
182 /* SW parser related functions */
184 struct mlx5e_swp_spec {
193 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
194 struct mlx5e_swp_spec *swp_spec)
196 /* SWP offsets are in 2-bytes words */
197 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
198 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
199 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
200 if (swp_spec->l4_proto) {
201 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
202 if (swp_spec->l4_proto == IPPROTO_UDP)
203 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
206 if (swp_spec->is_tun) {
207 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
208 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
209 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
210 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
211 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
212 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
213 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
215 switch (swp_spec->tun_l4_proto) {
217 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
220 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;