2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include <linux/ptp_classify.h>
36 #include <net/geneve.h>
37 #include <net/dsfield.h>
40 #include "ipoib/ipoib.h"
41 #include "en_accel/en_accel.h"
44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
48 for (i = 0; i < num_dma; i++) {
49 struct mlx5e_sq_dma *last_pushed_dma =
50 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
52 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
56 #ifdef CONFIG_MLX5_CORE_EN_DCB
57 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
61 if (skb->protocol == htons(ETH_P_IP))
62 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
63 else if (skb->protocol == htons(ETH_P_IPV6))
64 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
66 return priv->dcbx_dp.dscp2prio[dscp_cp];
70 static bool mlx5e_use_ptpsq(struct sk_buff *skb)
74 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
77 if (fk.basic.n_proto == htons(ETH_P_1588))
80 if (fk.basic.n_proto != htons(ETH_P_IP) &&
81 fk.basic.n_proto != htons(ETH_P_IPV6))
84 return (fk.basic.ip_proto == IPPROTO_UDP &&
85 fk.ports.dst == htons(PTP_EV_PORT));
88 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
90 struct mlx5e_priv *priv = netdev_priv(dev);
93 if (!netdev_get_num_tc(dev))
96 #ifdef CONFIG_MLX5_CORE_EN_DCB
97 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
98 up = mlx5e_get_dscp_up(priv, skb);
101 if (skb_vlan_tag_present(skb))
102 up = skb_vlan_tag_get_prio(skb);
105 return priv->port_ptp_tc2realtxq[up];
108 static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
113 if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
114 classid = TC_H_MIN(skb->priority);
116 classid = READ_ONCE(priv->htb.defcls);
121 return mlx5e_get_txq_by_classid(priv, classid);
124 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
125 struct net_device *sb_dev)
127 struct mlx5e_priv *priv = netdev_priv(dev);
133 /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
134 num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
135 if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
136 struct mlx5e_ptp *ptp_channel;
138 /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
139 u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
141 if (unlikely(htb_maj_id)) {
142 txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
147 ptp_channel = READ_ONCE(priv->channels.ptp);
148 if (unlikely(ptp_channel) &&
149 test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
150 mlx5e_use_ptpsq(skb))
151 return mlx5e_select_ptpsq(dev, skb);
153 txq_ix = netdev_pick_tx(dev, skb, NULL);
154 /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
155 * If they are selected, switch to regular queues.
156 * Driver to select these queues only at mlx5e_select_ptpsq()
157 * and mlx5e_select_htb_queue().
159 if (unlikely(txq_ix >= num_tc_x_num_ch))
160 txq_ix %= num_tc_x_num_ch;
162 txq_ix = netdev_pick_tx(dev, skb, NULL);
165 if (!netdev_get_num_tc(dev))
168 #ifdef CONFIG_MLX5_CORE_EN_DCB
169 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
170 up = mlx5e_get_dscp_up(priv, skb);
173 if (skb_vlan_tag_present(skb))
174 up = skb_vlan_tag_get_prio(skb);
176 /* Normalize any picked txq_ix to [0, num_channels),
177 * So we can return a txq_ix that matches the channel and
180 ch_ix = priv->txq2sq[txq_ix]->ch_ix;
182 return priv->channel_tc2realtxq[ch_ix][up];
185 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
187 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
189 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
192 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
194 if (skb_transport_header_was_set(skb))
195 return skb_transport_offset(skb);
197 return mlx5e_skb_l2_header_offset(skb);
200 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
206 case MLX5_INLINE_MODE_NONE:
208 case MLX5_INLINE_MODE_TCP_UDP:
209 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
210 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
213 case MLX5_INLINE_MODE_IP:
214 hlen = mlx5e_skb_l3_header_offset(skb);
216 case MLX5_INLINE_MODE_L2:
218 hlen = mlx5e_skb_l2_header_offset(skb);
220 return min_t(u16, hlen, skb_headlen(skb));
223 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
225 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
226 int cpy1_sz = 2 * ETH_ALEN;
227 int cpy2_sz = ihs - cpy1_sz;
229 memcpy(vhdr, skb->data, cpy1_sz);
230 vhdr->h_vlan_proto = skb->vlan_proto;
231 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
232 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
235 /* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
236 * need to set L3 checksum flag for IPsec
239 ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
240 struct mlx5_wqe_eth_seg *eseg)
242 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
243 if (skb->encapsulation) {
244 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
245 sq->stats->csum_partial_inner++;
247 sq->stats->csum_partial++;
252 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253 struct mlx5e_accel_tx_state *accel,
254 struct mlx5_wqe_eth_seg *eseg)
256 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
257 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
258 if (skb->encapsulation) {
259 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
260 MLX5_ETH_WQE_L4_INNER_CSUM;
261 sq->stats->csum_partial_inner++;
263 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
264 sq->stats->csum_partial++;
266 #ifdef CONFIG_MLX5_EN_TLS
267 } else if (unlikely(accel && accel->tls.tls_tisn)) {
268 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
269 sq->stats->csum_partial++;
271 } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
272 ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
274 sq->stats->csum_none++;
278 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
280 struct mlx5e_sq_stats *stats = sq->stats;
283 if (skb->encapsulation) {
284 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
285 stats->tso_inner_packets++;
286 stats->tso_inner_bytes += skb->len - ihs;
288 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
289 ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
291 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
292 stats->tso_packets++;
293 stats->tso_bytes += skb->len - ihs;
300 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
301 unsigned char *skb_data, u16 headlen,
302 struct mlx5_wqe_data_seg *dseg)
304 dma_addr_t dma_addr = 0;
309 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
311 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
312 goto dma_unmap_wqe_err;
314 dseg->addr = cpu_to_be64(dma_addr);
315 dseg->lkey = sq->mkey_be;
316 dseg->byte_count = cpu_to_be32(headlen);
318 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
323 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
324 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
325 int fsz = skb_frag_size(frag);
327 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
329 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
330 goto dma_unmap_wqe_err;
332 dseg->addr = cpu_to_be64(dma_addr);
333 dseg->lkey = sq->mkey_be;
334 dseg->byte_count = cpu_to_be32(fsz);
336 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
344 mlx5e_dma_unmap_wqe_err(sq, num_dma);
348 struct mlx5e_tx_attr {
357 struct mlx5e_tx_wqe_attr {
365 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
366 struct mlx5e_accel_tx_state *accel)
370 #ifdef CONFIG_MLX5_EN_TLS
371 if (accel && accel->tls.tls_tisn)
372 return MLX5_INLINE_MODE_TCP_UDP;
375 mode = sq->min_inline_mode;
377 if (skb_vlan_tag_present(skb) &&
378 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
379 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
384 static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
385 struct mlx5e_accel_tx_state *accel,
386 struct mlx5e_tx_attr *attr)
388 struct mlx5e_sq_stats *stats = sq->stats;
390 if (skb_is_gso(skb)) {
391 u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
393 *attr = (struct mlx5e_tx_attr) {
394 .opcode = MLX5_OPCODE_LSO,
395 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
397 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
398 .headlen = skb_headlen(skb) - ihs,
401 stats->packets += skb_shinfo(skb)->gso_segs;
403 u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
404 u16 ihs = mlx5e_calc_min_inline(mode, skb);
406 *attr = (struct mlx5e_tx_attr) {
407 .opcode = MLX5_OPCODE_SEND,
408 .mss = cpu_to_be16(0),
410 .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
411 .headlen = skb_headlen(skb) - ihs,
417 attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
418 stats->bytes += attr->num_bytes;
421 static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
422 struct mlx5e_tx_wqe_attr *wqe_attr)
424 u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
429 ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
432 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
434 u16 inl = attr->ihs - INL_HDR_START_SZ;
436 if (skb_vlan_tag_present(skb))
439 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
440 ds_cnt += ds_cnt_inl;
443 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
445 .ds_cnt_inl = ds_cnt_inl,
446 .ds_cnt_ids = ds_cnt_ids,
447 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
451 static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
453 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
454 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
457 static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
459 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
460 netif_tx_stop_queue(sq->txq);
461 sq->stats->stopped++;
466 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
467 const struct mlx5e_tx_attr *attr,
468 const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
469 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
472 struct mlx5_wq_cyc *wq = &sq->wq;
475 *wi = (struct mlx5e_tx_wqe_info) {
477 .num_bytes = attr->num_bytes,
479 .num_wqebbs = wqe_attr->num_wqebbs,
483 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
484 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
486 mlx5e_tx_skb_update_hwts_flags(skb);
488 sq->pc += wi->num_wqebbs;
490 mlx5e_tx_check_stop(sq);
492 if (unlikely(sq->ptpsq)) {
493 mlx5e_skb_cb_hwtstamp_init(skb);
494 mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
498 send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
500 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
504 mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
505 const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
506 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
508 struct mlx5_wqe_ctrl_seg *cseg;
509 struct mlx5_wqe_eth_seg *eseg;
510 struct mlx5_wqe_data_seg *dseg;
511 struct mlx5e_tx_wqe_info *wi;
513 struct mlx5e_sq_stats *stats = sq->stats;
516 stats->xmit_more += xmit_more;
519 wi = &sq->db.wqe_info[pi];
524 eseg->mss = attr->mss;
527 if (skb_vlan_tag_present(skb)) {
528 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
529 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
530 stats->added_vlan_packets++;
532 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
533 memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
535 dseg += wqe_attr->ds_cnt_inl;
536 } else if (skb_vlan_tag_present(skb)) {
537 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
538 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
539 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
540 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
541 stats->added_vlan_packets++;
544 dseg += wqe_attr->ds_cnt_ids;
545 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
546 attr->headlen, dseg);
547 if (unlikely(num_dma < 0))
550 mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
556 dev_kfree_skb_any(skb);
559 static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
561 return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
565 static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
567 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
569 /* Assumes the session is already running and has at least one packet. */
570 return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
573 static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
574 struct mlx5_wqe_eth_seg *eseg)
576 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
577 struct mlx5e_tx_wqe *wqe;
580 pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
581 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
582 net_prefetchw(wqe->data);
584 *session = (struct mlx5e_tx_mpwqe) {
587 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
592 memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
594 sq->stats->mpwqe_blks++;
597 static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
599 return sq->mpwqe.wqe;
602 static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
604 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
605 struct mlx5_wqe_data_seg *dseg;
607 dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
609 session->pkt_count++;
610 session->bytes_count += txd->len;
612 dseg->addr = cpu_to_be64(txd->dma_addr);
613 dseg->byte_count = cpu_to_be32(txd->len);
614 dseg->lkey = sq->mkey_be;
617 sq->stats->mpwqe_pkts++;
620 static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
622 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
623 u8 ds_count = session->ds_count;
624 struct mlx5_wqe_ctrl_seg *cseg;
625 struct mlx5e_tx_wqe_info *wi;
628 cseg = &session->wqe->ctrl;
629 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
630 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
632 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
633 wi = &sq->db.wqe_info[pi];
634 *wi = (struct mlx5e_tx_wqe_info) {
636 .num_bytes = session->bytes_count,
637 .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
638 .num_dma = session->pkt_count,
639 .num_fifo_pkts = session->pkt_count,
642 sq->pc += wi->num_wqebbs;
646 mlx5e_tx_check_stop(sq);
652 mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
653 struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
655 struct mlx5_wqe_ctrl_seg *cseg;
656 struct mlx5e_xmit_data txd;
658 if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
659 mlx5e_tx_mpwqe_session_start(sq, eseg);
660 } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
661 mlx5e_tx_mpwqe_session_complete(sq);
662 mlx5e_tx_mpwqe_session_start(sq, eseg);
665 sq->stats->xmit_more += xmit_more;
667 txd.data = skb->data;
670 txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
671 if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
673 mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
675 mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
677 mlx5e_tx_mpwqe_add_dseg(sq, &txd);
679 mlx5e_tx_skb_update_hwts_flags(skb);
681 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
682 /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
683 cseg = mlx5e_tx_mpwqe_session_complete(sq);
685 if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
686 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
687 } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
688 /* Might stop the queue, but we were asked to ring the doorbell anyway. */
689 cseg = mlx5e_tx_mpwqe_session_complete(sq);
691 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
697 mlx5e_dma_unmap_wqe_err(sq, 1);
698 sq->stats->dropped++;
699 dev_kfree_skb_any(skb);
702 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
704 /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */
705 if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
706 mlx5e_tx_mpwqe_session_complete(sq);
709 static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
710 struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
711 struct mlx5_wqe_eth_seg *eseg, u16 ihs)
713 if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
716 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
721 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
723 struct mlx5e_priv *priv = netdev_priv(dev);
724 struct mlx5e_accel_tx_state accel = {};
725 struct mlx5e_tx_wqe_attr wqe_attr;
726 struct mlx5e_tx_attr attr;
727 struct mlx5e_tx_wqe *wqe;
728 struct mlx5e_txqsq *sq;
731 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
733 dev_kfree_skb_any(skb);
737 /* May send SKBs and WQEs. */
738 if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
741 mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
743 if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
744 if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
745 struct mlx5_wqe_eth_seg eseg = {};
747 if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
751 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
755 mlx5e_tx_mpwqe_ensure_complete(sq);
758 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
759 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
760 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
762 /* May update the WQE, but may not post other WQEs. */
763 mlx5e_accel_tx_finish(sq, wqe, &accel,
764 (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
765 if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
768 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
773 void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
775 struct mlx5e_tx_wqe_attr wqe_attr;
776 struct mlx5e_tx_attr attr;
777 struct mlx5e_tx_wqe *wqe;
780 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
781 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
782 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
783 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
784 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
785 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
788 static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
793 for (i = 0; i < wi->num_dma; i++) {
794 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
796 mlx5e_tx_dma_unmap(sq->pdev, dma);
800 static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
801 struct mlx5_cqe64 *cqe, int napi_budget)
803 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
804 struct skb_shared_hwtstamps hwts = {};
805 u64 ts = get_cqe_ts(cqe);
807 hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
809 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
810 hwts.hwtstamp, sq->ptpsq->cq_stats);
812 skb_tstamp_tx(skb, &hwts);
815 napi_consume_skb(skb, napi_budget);
818 static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
819 struct mlx5_cqe64 *cqe, int napi_budget)
823 for (i = 0; i < wi->num_fifo_pkts; i++) {
824 struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
826 mlx5e_consume_skb(sq, skb, cqe, napi_budget);
830 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
832 struct mlx5e_sq_stats *stats;
833 struct mlx5e_txqsq *sq;
834 struct mlx5_cqe64 *cqe;
841 sq = container_of(cq, struct mlx5e_txqsq, cq);
843 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
846 cqe = mlx5_cqwq_get_cqe(&cq->wq);
855 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
856 * otherwise a cq overrun may occur
860 /* avoid dirtying sq cache line every cqe */
861 dma_fifo_cc = sq->dma_fifo_cc;
865 struct mlx5e_tx_wqe_info *wi;
870 mlx5_cqwq_pop(&cq->wq);
872 wqe_counter = be16_to_cpu(cqe->wqe_counter);
875 last_wqe = (sqcc == wqe_counter);
877 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
878 wi = &sq->db.wqe_info[ci];
880 sqcc += wi->num_wqebbs;
882 if (likely(wi->skb)) {
883 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
884 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
887 nbytes += wi->num_bytes;
891 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
895 if (wi->num_fifo_pkts) {
896 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
897 mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
899 npkts += wi->num_fifo_pkts;
900 nbytes += wi->num_bytes;
904 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
905 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
907 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
908 (struct mlx5_err_cqe *)cqe);
909 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
910 queue_work(cq->priv->wq, &sq->recover_work);
915 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
919 mlx5_cqwq_update_db_record(&cq->wq);
921 /* ensure cq space is freed before enabling more cqes */
924 sq->dma_fifo_cc = dma_fifo_cc;
927 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
929 if (netif_tx_queue_stopped(sq->txq) &&
930 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
931 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
932 netif_tx_wake_queue(sq->txq);
936 return (i == MLX5E_TX_CQ_POLL_BUDGET);
939 static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
943 for (i = 0; i < wi->num_fifo_pkts; i++)
944 dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
947 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
949 struct mlx5e_tx_wqe_info *wi;
950 u32 dma_fifo_cc, nbytes = 0;
951 u16 ci, sqcc, npkts = 0;
954 dma_fifo_cc = sq->dma_fifo_cc;
956 while (sqcc != sq->pc) {
957 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
958 wi = &sq->db.wqe_info[ci];
960 sqcc += wi->num_wqebbs;
962 if (likely(wi->skb)) {
963 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
964 dev_kfree_skb_any(wi->skb);
967 nbytes += wi->num_bytes;
971 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
974 if (wi->num_fifo_pkts) {
975 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
976 mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
978 npkts += wi->num_fifo_pkts;
979 nbytes += wi->num_bytes;
983 sq->dma_fifo_cc = dma_fifo_cc;
986 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
989 #ifdef CONFIG_MLX5_CORE_IPOIB
991 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
992 struct mlx5_wqe_datagram_seg *dseg)
994 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
995 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
996 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
999 static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
1000 const struct mlx5e_tx_attr *attr,
1001 struct mlx5e_tx_wqe_attr *wqe_attr)
1003 u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
1006 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
1009 u16 inl = attr->ihs - INL_HDR_START_SZ;
1011 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
1012 ds_cnt += ds_cnt_inl;
1015 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
1017 .ds_cnt_inl = ds_cnt_inl,
1018 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
1022 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1023 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
1025 struct mlx5e_tx_wqe_attr wqe_attr;
1026 struct mlx5e_tx_attr attr;
1027 struct mlx5i_tx_wqe *wqe;
1029 struct mlx5_wqe_datagram_seg *datagram;
1030 struct mlx5_wqe_ctrl_seg *cseg;
1031 struct mlx5_wqe_eth_seg *eseg;
1032 struct mlx5_wqe_data_seg *dseg;
1033 struct mlx5e_tx_wqe_info *wi;
1035 struct mlx5e_sq_stats *stats = sq->stats;
1039 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
1040 mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
1042 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
1043 wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
1045 stats->xmit_more += xmit_more;
1048 wi = &sq->db.wqe_info[pi];
1050 datagram = &wqe->datagram;
1054 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
1056 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
1058 eseg->mss = attr.mss;
1061 memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
1062 eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
1063 dseg += wqe_attr.ds_cnt_inl;
1066 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
1067 attr.headlen, dseg);
1068 if (unlikely(num_dma < 0))
1071 mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
1077 dev_kfree_skb_any(skb);