1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8 #include <net/netdev_queues.h>
11 #include "ionic_lif.h"
12 #include "ionic_txrx.h"
14 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
15 void *data, size_t len);
17 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
18 const skb_frag_t *frag,
19 size_t offset, size_t len);
21 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
22 struct ionic_tx_desc_info *desc_info);
24 static void ionic_tx_clean(struct ionic_queue *q,
25 struct ionic_tx_desc_info *desc_info,
26 struct ionic_txq_comp *comp);
28 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
30 ionic_q_post(q, ring_dbell);
33 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell)
35 ionic_q_post(q, ring_dbell);
38 bool ionic_txq_poke_doorbell(struct ionic_queue *q)
40 struct netdev_queue *netdev_txq;
41 unsigned long now, then, dif;
42 struct net_device *netdev;
44 netdev = q->lif->netdev;
45 netdev_txq = netdev_get_tx_queue(netdev, q->index);
47 HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
49 if (q->tail_idx == q->head_idx) {
50 HARD_TX_UNLOCK(netdev, netdev_txq);
54 now = READ_ONCE(jiffies);
55 then = q->dbell_jiffies;
58 if (dif > q->dbell_deadline) {
59 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
60 q->dbval | q->head_idx);
62 q->dbell_jiffies = now;
65 HARD_TX_UNLOCK(netdev, netdev_txq);
70 bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
72 unsigned long now, then, dif;
74 /* no lock, called from rx napi or txrx napi, nothing else can fill */
76 if (q->tail_idx == q->head_idx)
79 now = READ_ONCE(jiffies);
80 then = q->dbell_jiffies;
83 if (dif > q->dbell_deadline) {
84 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
85 q->dbval | q->head_idx);
87 q->dbell_jiffies = now;
89 dif = 2 * q->dbell_deadline;
90 if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
91 dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
93 q->dbell_deadline = dif;
99 static inline struct ionic_txq_sg_elem *ionic_tx_sg_elems(struct ionic_queue *q)
101 if (likely(q->sg_desc_size == sizeof(struct ionic_txq_sg_desc_v1)))
102 return q->txq_sgl_v1[q->head_idx].elems;
104 return q->txq_sgl[q->head_idx].elems;
107 static inline struct netdev_queue *q_to_ndq(struct net_device *netdev,
108 struct ionic_queue *q)
110 return netdev_get_tx_queue(netdev, q->index);
113 static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
115 return page_address(buf_info->page) + buf_info->page_offset;
118 static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
120 return buf_info->dma_addr + buf_info->page_offset;
123 static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
125 return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
128 static int ionic_rx_page_alloc(struct ionic_queue *q,
129 struct ionic_buf_info *buf_info)
131 struct device *dev = q->dev;
135 page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
136 if (unlikely(!page)) {
137 net_err_ratelimited("%s: %s page alloc failed\n",
138 dev_name(dev), q->name);
139 q_to_rx_stats(q)->alloc_err++;
143 dma_addr = dma_map_page(dev, page, 0,
144 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
145 if (unlikely(dma_mapping_error(dev, dma_addr))) {
146 __free_pages(page, 0);
147 net_err_ratelimited("%s: %s dma map failed\n",
148 dev_name(dev), q->name);
149 q_to_rx_stats(q)->dma_map_err++;
153 buf_info->dma_addr = dma_addr;
154 buf_info->page = page;
155 buf_info->page_offset = 0;
160 static void ionic_rx_page_free(struct ionic_queue *q,
161 struct ionic_buf_info *buf_info)
163 struct device *dev = q->dev;
165 if (unlikely(!buf_info)) {
166 net_err_ratelimited("%s: %s invalid buf_info in free\n",
167 dev_name(dev), q->name);
174 dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
175 __free_pages(buf_info->page, 0);
176 buf_info->page = NULL;
179 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
180 struct ionic_buf_info *buf_info, u32 len)
184 /* don't re-use pages allocated in low-mem condition */
185 if (page_is_pfmemalloc(buf_info->page))
188 /* don't re-use buffers from non-local numa nodes */
189 if (page_to_nid(buf_info->page) != numa_mem_id())
192 size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
193 buf_info->page_offset += size;
194 if (buf_info->page_offset >= IONIC_PAGE_SIZE)
197 get_page(buf_info->page);
202 static void ionic_rx_add_skb_frag(struct ionic_queue *q,
204 struct ionic_buf_info *buf_info,
209 dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
210 off, len, DMA_FROM_DEVICE);
212 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
213 buf_info->page, buf_info->page_offset + off,
217 if (!ionic_rx_buf_recycle(q, buf_info, len)) {
218 dma_unmap_page(q->dev, buf_info->dma_addr,
219 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
220 buf_info->page = NULL;
224 static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
225 struct ionic_rx_desc_info *desc_info,
226 unsigned int headroom,
228 unsigned int num_sg_elems,
231 struct ionic_buf_info *buf_info;
236 buf_info = &desc_info->bufs[0];
237 prefetchw(buf_info->page);
239 skb = napi_get_frags(&q_to_qcq(q)->napi);
240 if (unlikely(!skb)) {
241 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
242 dev_name(q->dev), q->name);
243 q_to_rx_stats(q)->alloc_err++;
248 frag_len = min_t(u16, len,
249 IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
251 frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
253 if (unlikely(!buf_info->page))
254 goto err_bad_buf_page;
255 ionic_rx_add_skb_frag(q, skb, buf_info, headroom, frag_len, synced);
259 for (i = 0; i < num_sg_elems; i++, buf_info++) {
260 if (unlikely(!buf_info->page))
261 goto err_bad_buf_page;
262 frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
263 ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
274 static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
275 struct ionic_queue *q,
276 struct ionic_rx_desc_info *desc_info,
277 unsigned int headroom,
281 struct ionic_buf_info *buf_info;
282 struct device *dev = q->dev;
285 buf_info = &desc_info->bufs[0];
287 skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
288 if (unlikely(!skb)) {
289 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
290 dev_name(dev), q->name);
291 q_to_rx_stats(q)->alloc_err++;
295 if (unlikely(!buf_info->page)) {
301 dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
302 headroom, len, DMA_FROM_DEVICE);
303 skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
304 dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
305 headroom, len, DMA_FROM_DEVICE);
308 skb->protocol = eth_type_trans(skb, netdev);
313 static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
314 struct ionic_tx_desc_info *desc_info)
316 unsigned int nbufs = desc_info->nbufs;
317 struct ionic_buf_info *buf_info;
318 struct device *dev = q->dev;
324 buf_info = desc_info->bufs;
325 dma_unmap_single(dev, buf_info->dma_addr,
326 buf_info->len, DMA_TO_DEVICE);
327 if (desc_info->act == XDP_TX)
328 __free_pages(buf_info->page, 0);
329 buf_info->page = NULL;
332 for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
333 dma_unmap_page(dev, buf_info->dma_addr,
334 buf_info->len, DMA_TO_DEVICE);
335 if (desc_info->act == XDP_TX)
336 __free_pages(buf_info->page, 0);
337 buf_info->page = NULL;
340 if (desc_info->act == XDP_REDIRECT)
341 xdp_return_frame(desc_info->xdpf);
343 desc_info->nbufs = 0;
344 desc_info->xdpf = NULL;
348 static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
349 enum xdp_action act, struct page *page, int off,
352 struct ionic_tx_desc_info *desc_info;
353 struct ionic_buf_info *buf_info;
354 struct ionic_tx_stats *stats;
355 struct ionic_txq_desc *desc;
356 size_t len = frame->len;
360 desc_info = &q->tx_info[q->head_idx];
361 desc = &q->txq[q->head_idx];
362 buf_info = desc_info->bufs;
363 stats = q_to_tx_stats(q);
365 dma_addr = ionic_tx_map_single(q, frame->data, len);
368 buf_info->dma_addr = dma_addr;
370 buf_info->page = page;
371 buf_info->page_offset = off;
373 desc_info->nbufs = 1;
374 desc_info->xdpf = frame;
375 desc_info->act = act;
377 if (xdp_frame_has_frags(frame)) {
378 struct ionic_txq_sg_elem *elem;
379 struct skb_shared_info *sinfo;
380 struct ionic_buf_info *bi;
385 sinfo = xdp_get_shared_info_from_frame(frame);
387 elem = ionic_tx_sg_elems(q);
388 for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
389 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
391 ionic_tx_desc_unmap_bufs(q, desc_info);
394 bi->dma_addr = dma_addr;
395 bi->len = skb_frag_size(frag);
396 bi->page = skb_frag_page(frag);
398 elem->addr = cpu_to_le64(bi->dma_addr);
399 elem->len = cpu_to_le16(bi->len);
406 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
407 0, (desc_info->nbufs - 1), buf_info->dma_addr);
408 desc->cmd = cpu_to_le64(cmd);
409 desc->len = cpu_to_le16(len);
410 desc->csum_start = 0;
411 desc->csum_offset = 0;
417 ionic_txq_post(q, ring_doorbell);
422 int ionic_xdp_xmit(struct net_device *netdev, int n,
423 struct xdp_frame **xdp_frames, u32 flags)
425 struct ionic_lif *lif = netdev_priv(netdev);
426 struct ionic_queue *txq;
427 struct netdev_queue *nq;
433 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state)))
436 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
439 /* AdminQ is assumed on cpu 0, while we attempt to affinitize the
440 * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that
441 * affinitization here, but of course irqbalance and friends might
442 * have juggled things anyway, so we have to check for the 0 case.
444 cpu = smp_processor_id();
445 qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
447 txq = &lif->txqcqs[qi]->q;
448 nq = netdev_get_tx_queue(netdev, txq->index);
449 __netif_tx_lock(nq, cpu);
450 txq_trans_cond_update(nq);
452 if (netif_tx_queue_stopped(nq) ||
453 !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
454 ionic_q_space_avail(txq),
456 __netif_tx_unlock(nq);
460 space = min_t(int, n, ionic_q_space_avail(txq));
461 for (nxmit = 0; nxmit < space ; nxmit++) {
462 if (ionic_xdp_post_frame(txq, xdp_frames[nxmit],
464 virt_to_page(xdp_frames[nxmit]->data),
471 if (flags & XDP_XMIT_FLUSH)
472 ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
473 txq->dbval | txq->head_idx);
475 netif_txq_maybe_stop(q_to_ndq(netdev, txq),
476 ionic_q_space_avail(txq),
478 __netif_tx_unlock(nq);
483 static bool ionic_run_xdp(struct ionic_rx_stats *stats,
484 struct net_device *netdev,
485 struct bpf_prog *xdp_prog,
486 struct ionic_queue *rxq,
487 struct ionic_buf_info *buf_info,
490 u32 xdp_action = XDP_ABORTED;
491 struct xdp_buff xdp_buf;
492 struct ionic_queue *txq;
493 struct netdev_queue *nq;
494 struct xdp_frame *xdpf;
499 xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
500 frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
501 xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
502 XDP_PACKET_HEADROOM, frag_len, false);
504 dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
505 XDP_PACKET_HEADROOM, len,
508 prefetchw(&xdp_buf.data_hard_start);
510 /* We limit MTU size to one buffer if !xdp_has_frags, so
511 * if the recv len is bigger than one buffer
512 * then we know we have frag info to gather
514 remain_len = len - frag_len;
516 struct skb_shared_info *sinfo;
517 struct ionic_buf_info *bi;
521 sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
523 sinfo->xdp_frags_size = 0;
524 xdp_buff_set_frags_flag(&xdp_buf);
527 if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
532 frag = &sinfo->frags[sinfo->nr_frags];
535 frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
536 dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
537 0, frag_len, DMA_FROM_DEVICE);
538 skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
539 sinfo->xdp_frags_size += frag_len;
540 remain_len -= frag_len;
542 if (page_is_pfmemalloc(bi->page))
543 xdp_buff_set_frag_pfmemalloc(&xdp_buf);
544 } while (remain_len > 0);
547 xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
549 switch (xdp_action) {
552 return false; /* false = we didn't consume the packet */
555 ionic_rx_page_free(rxq, buf_info);
560 xdpf = xdp_convert_buff_to_frame(&xdp_buf);
565 nq = netdev_get_tx_queue(netdev, txq->index);
566 __netif_tx_lock(nq, smp_processor_id());
567 txq_trans_cond_update(nq);
569 if (netif_tx_queue_stopped(nq) ||
570 !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
571 ionic_q_space_avail(txq),
573 __netif_tx_unlock(nq);
577 dma_unmap_page(rxq->dev, buf_info->dma_addr,
578 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
580 err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
582 buf_info->page_offset,
584 __netif_tx_unlock(nq);
586 netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
591 /* the Tx completion will free the buffers */
595 /* unmap the pages before handing them to a different device */
596 dma_unmap_page(rxq->dev, buf_info->dma_addr,
597 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
599 err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
601 netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
604 buf_info->page = NULL;
605 rxq->xdp_flush = true;
606 stats->xdp_redirect++;
617 trace_xdp_exception(netdev, xdp_prog, xdp_action);
618 ionic_rx_page_free(rxq, buf_info);
619 stats->xdp_aborted++;
624 static void ionic_rx_clean(struct ionic_queue *q,
625 struct ionic_rx_desc_info *desc_info,
626 struct ionic_rxq_comp *comp)
628 struct net_device *netdev = q->lif->netdev;
629 struct ionic_qcq *qcq = q_to_qcq(q);
630 struct ionic_rx_stats *stats;
631 struct bpf_prog *xdp_prog;
632 unsigned int headroom;
638 stats = q_to_rx_stats(q);
645 len = le16_to_cpu(comp->len);
649 xdp_prog = READ_ONCE(q->lif->xdp_prog);
651 if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
656 headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
657 use_copybreak = len <= q->lif->rx_copybreak;
659 skb = ionic_rx_copybreak(netdev, q, desc_info,
660 headroom, len, synced);
662 skb = ionic_rx_build_skb(q, desc_info, headroom, len,
663 comp->num_sg_elems, synced);
665 if (unlikely(!skb)) {
670 skb_record_rx_queue(skb, q->index);
672 if (likely(netdev->features & NETIF_F_RXHASH)) {
673 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
674 case IONIC_PKT_TYPE_IPV4:
675 case IONIC_PKT_TYPE_IPV6:
676 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
679 case IONIC_PKT_TYPE_IPV4_TCP:
680 case IONIC_PKT_TYPE_IPV6_TCP:
681 case IONIC_PKT_TYPE_IPV4_UDP:
682 case IONIC_PKT_TYPE_IPV6_UDP:
683 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
689 if (likely(netdev->features & NETIF_F_RXCSUM) &&
690 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
691 skb->ip_summed = CHECKSUM_COMPLETE;
692 skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
693 stats->csum_complete++;
698 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
699 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
700 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
703 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
704 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
705 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
706 le16_to_cpu(comp->vlan_tci));
707 stats->vlan_stripped++;
710 if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
711 __le64 *cq_desc_hwstamp;
717 sizeof(struct ionic_rxq_comp) -
718 IONIC_HWSTAMP_CQ_NEGOFFSET;
720 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
722 if (hwstamp != IONIC_HWSTAMP_INVALID) {
723 skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
724 stats->hwstamp_valid++;
726 stats->hwstamp_invalid++;
731 napi_gro_receive(&qcq->napi, skb);
733 napi_gro_frags(&qcq->napi);
736 bool ionic_rx_service(struct ionic_cq *cq)
738 struct ionic_rx_desc_info *desc_info;
739 struct ionic_queue *q = cq->bound_q;
740 struct ionic_rxq_comp *comp;
742 comp = &((struct ionic_rxq_comp *)cq->base)[cq->tail_idx];
744 if (!color_match(comp->pkt_type_color, cq->done_color))
747 /* check for empty queue */
748 if (q->tail_idx == q->head_idx)
751 if (q->tail_idx != le16_to_cpu(comp->comp_index))
754 desc_info = &q->rx_info[q->tail_idx];
755 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
757 /* clean the related q entry, only one per qc completion */
758 ionic_rx_clean(q, desc_info, comp);
763 static inline void ionic_write_cmb_desc(struct ionic_queue *q,
766 /* Since Rx and Tx descriptors are the same size, we can
767 * save an instruction or two and skip the qtype check.
769 if (unlikely(q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS))
770 memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
773 void ionic_rx_fill(struct ionic_queue *q)
775 struct net_device *netdev = q->lif->netdev;
776 struct ionic_rx_desc_info *desc_info;
777 struct ionic_rxq_sg_elem *sg_elem;
778 struct ionic_buf_info *buf_info;
779 unsigned int fill_threshold;
780 struct ionic_rxq_desc *desc;
781 unsigned int remain_len;
782 unsigned int frag_len;
789 n_fill = ionic_q_space_avail(q);
791 fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD,
792 q->num_descs / IONIC_RX_FILL_DIV);
793 if (n_fill < fill_threshold)
796 len = netdev->mtu + VLAN_ETH_HLEN;
798 for (i = n_fill; i; i--) {
799 unsigned int headroom;
800 unsigned int buf_len;
804 desc = &q->rxq[q->head_idx];
805 desc_info = &q->rx_info[q->head_idx];
806 buf_info = &desc_info->bufs[0];
808 if (!buf_info->page) { /* alloc a new buffer? */
809 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
816 /* fill main descriptor - buf[0]
817 * XDP uses space in the first buffer, so account for
818 * head room, tail room, and ip header in the first frag size.
820 headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
822 buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
824 buf_len = ionic_rx_buf_size(buf_info);
825 frag_len = min_t(u16, len, buf_len);
827 desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
828 desc->len = cpu_to_le16(frag_len);
829 remain_len -= frag_len;
833 /* fill sg descriptors - buf[1..n] */
834 sg_elem = q->rxq_sgl[q->head_idx].elems;
835 for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
836 if (!buf_info->page) { /* alloc a new sg buffer? */
837 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
844 sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
845 frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
846 sg_elem->len = cpu_to_le16(frag_len);
847 remain_len -= frag_len;
852 /* clear end sg element as a sentinel */
853 if (j < q->max_sg_elems)
854 memset(sg_elem, 0, sizeof(*sg_elem));
856 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
857 IONIC_RXQ_DESC_OPCODE_SIMPLE;
858 desc_info->nbufs = nfrags;
860 ionic_write_cmb_desc(q, desc);
862 ionic_rxq_post(q, false);
865 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
866 q->dbval | q->head_idx);
868 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
869 q->dbell_jiffies = jiffies;
871 mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
872 jiffies + IONIC_NAPI_DEADLINE);
875 void ionic_rx_empty(struct ionic_queue *q)
877 struct ionic_rx_desc_info *desc_info;
878 struct ionic_buf_info *buf_info;
881 for (i = 0; i < q->num_descs; i++) {
882 desc_info = &q->rx_info[i];
883 for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
884 buf_info = &desc_info->bufs[j];
886 ionic_rx_page_free(q, buf_info);
889 desc_info->nbufs = 0;
896 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
898 struct dim_sample dim_sample;
899 struct ionic_lif *lif;
903 if (!qcq->intr.dim_coal_hw)
907 qi = qcq->cq.bound_q->index;
910 case IONIC_LIF_F_TX_DIM_INTR:
911 pkts = lif->txqstats[qi].pkts;
912 bytes = lif->txqstats[qi].bytes;
914 case IONIC_LIF_F_RX_DIM_INTR:
915 pkts = lif->rxqstats[qi].pkts;
916 bytes = lif->rxqstats[qi].bytes;
919 pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
920 bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
924 dim_update_sample(qcq->cq.bound_intr->rearm_count,
925 pkts, bytes, &dim_sample);
927 net_dim(&qcq->dim, dim_sample);
930 int ionic_tx_napi(struct napi_struct *napi, int budget)
932 struct ionic_qcq *qcq = napi_to_qcq(napi);
933 struct ionic_cq *cq = napi_to_cq(napi);
937 work_done = ionic_tx_cq_service(cq, budget);
939 if (unlikely(!budget))
942 if (work_done < budget && napi_complete_done(napi, work_done)) {
943 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
944 flags |= IONIC_INTR_CRED_UNMASK;
945 cq->bound_intr->rearm_count++;
948 if (work_done || flags) {
949 flags |= IONIC_INTR_CRED_RESET_COALESCE;
950 ionic_intr_credits(cq->idev->intr_ctrl,
951 cq->bound_intr->index,
955 if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
956 mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
961 static void ionic_xdp_do_flush(struct ionic_cq *cq)
963 if (cq->bound_q->xdp_flush) {
965 cq->bound_q->xdp_flush = false;
969 int ionic_rx_napi(struct napi_struct *napi, int budget)
971 struct ionic_qcq *qcq = napi_to_qcq(napi);
972 struct ionic_cq *cq = napi_to_cq(napi);
976 if (unlikely(!budget))
979 work_done = ionic_cq_service(cq, budget,
980 ionic_rx_service, NULL, NULL);
982 ionic_rx_fill(cq->bound_q);
984 ionic_xdp_do_flush(cq);
985 if (work_done < budget && napi_complete_done(napi, work_done)) {
986 ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
987 flags |= IONIC_INTR_CRED_UNMASK;
988 cq->bound_intr->rearm_count++;
991 if (work_done || flags) {
992 flags |= IONIC_INTR_CRED_RESET_COALESCE;
993 ionic_intr_credits(cq->idev->intr_ctrl,
994 cq->bound_intr->index,
998 if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
999 mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
1004 int ionic_txrx_napi(struct napi_struct *napi, int budget)
1006 struct ionic_qcq *rxqcq = napi_to_qcq(napi);
1007 struct ionic_cq *rxcq = napi_to_cq(napi);
1008 unsigned int qi = rxcq->bound_q->index;
1009 struct ionic_qcq *txqcq;
1010 struct ionic_lif *lif;
1011 struct ionic_cq *txcq;
1012 bool resched = false;
1013 u32 rx_work_done = 0;
1014 u32 tx_work_done = 0;
1017 lif = rxcq->bound_q->lif;
1018 txqcq = lif->txqcqs[qi];
1019 txcq = &lif->txqcqs[qi]->cq;
1021 tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
1023 if (unlikely(!budget))
1026 rx_work_done = ionic_cq_service(rxcq, budget,
1027 ionic_rx_service, NULL, NULL);
1029 ionic_rx_fill(rxcq->bound_q);
1031 ionic_xdp_do_flush(rxcq);
1032 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
1033 ionic_dim_update(rxqcq, 0);
1034 flags |= IONIC_INTR_CRED_UNMASK;
1035 rxcq->bound_intr->rearm_count++;
1038 if (rx_work_done || flags) {
1039 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1040 ionic_intr_credits(rxcq->idev->intr_ctrl, rxcq->bound_intr->index,
1041 tx_work_done + rx_work_done, flags);
1044 if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
1046 if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
1049 mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
1051 return rx_work_done;
1054 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
1055 void *data, size_t len)
1057 struct device *dev = q->dev;
1058 dma_addr_t dma_addr;
1060 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
1061 if (dma_mapping_error(dev, dma_addr)) {
1062 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
1063 dev_name(dev), q->name);
1064 q_to_tx_stats(q)->dma_map_err++;
1070 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
1071 const skb_frag_t *frag,
1072 size_t offset, size_t len)
1074 struct device *dev = q->dev;
1075 dma_addr_t dma_addr;
1077 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
1078 if (dma_mapping_error(dev, dma_addr)) {
1079 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
1080 dev_name(dev), q->name);
1081 q_to_tx_stats(q)->dma_map_err++;
1087 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
1088 struct ionic_tx_desc_info *desc_info)
1090 struct ionic_buf_info *buf_info = desc_info->bufs;
1091 struct device *dev = q->dev;
1092 dma_addr_t dma_addr;
1093 unsigned int nfrags;
1097 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
1100 buf_info->dma_addr = dma_addr;
1101 buf_info->len = skb_headlen(skb);
1104 frag = skb_shinfo(skb)->frags;
1105 nfrags = skb_shinfo(skb)->nr_frags;
1106 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
1107 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
1110 buf_info->dma_addr = dma_addr;
1111 buf_info->len = skb_frag_size(frag);
1115 desc_info->nbufs = 1 + nfrags;
1120 /* unwind the frag mappings and the head mapping */
1121 while (frag_idx > 0) {
1124 dma_unmap_page(dev, buf_info->dma_addr,
1125 buf_info->len, DMA_TO_DEVICE);
1127 dma_unmap_single(dev, desc_info->bufs[0].dma_addr,
1128 desc_info->bufs[0].len, DMA_TO_DEVICE);
1132 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
1133 struct ionic_tx_desc_info *desc_info)
1135 struct ionic_buf_info *buf_info = desc_info->bufs;
1136 struct device *dev = q->dev;
1139 if (!desc_info->nbufs)
1142 dma_unmap_single(dev, buf_info->dma_addr,
1143 buf_info->len, DMA_TO_DEVICE);
1145 for (i = 1; i < desc_info->nbufs; i++, buf_info++)
1146 dma_unmap_page(dev, buf_info->dma_addr,
1147 buf_info->len, DMA_TO_DEVICE);
1149 desc_info->nbufs = 0;
1152 static void ionic_tx_clean(struct ionic_queue *q,
1153 struct ionic_tx_desc_info *desc_info,
1154 struct ionic_txq_comp *comp)
1156 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1157 struct ionic_qcq *qcq = q_to_qcq(q);
1158 struct sk_buff *skb;
1160 if (desc_info->xdpf) {
1161 ionic_xdp_tx_desc_clean(q->partner, desc_info);
1164 if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
1165 netif_wake_subqueue(q->lif->netdev, q->index);
1170 ionic_tx_desc_unmap_bufs(q, desc_info);
1172 skb = desc_info->skb;
1176 if (unlikely(ionic_txq_hwstamp_enabled(q))) {
1178 struct skb_shared_hwtstamps hwts = {};
1179 __le64 *cq_desc_hwstamp;
1185 sizeof(struct ionic_txq_comp) -
1186 IONIC_HWSTAMP_CQ_NEGOFFSET;
1188 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
1190 if (hwstamp != IONIC_HWSTAMP_INVALID) {
1191 hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
1193 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1194 skb_tstamp_tx(skb, &hwts);
1196 stats->hwstamp_valid++;
1198 stats->hwstamp_invalid++;
1203 desc_info->bytes = skb->len;
1206 napi_consume_skb(skb, 1);
1209 static bool ionic_tx_service(struct ionic_cq *cq,
1210 unsigned int *total_pkts, unsigned int *total_bytes)
1212 struct ionic_tx_desc_info *desc_info;
1213 struct ionic_queue *q = cq->bound_q;
1214 struct ionic_txq_comp *comp;
1215 unsigned int bytes = 0;
1216 unsigned int pkts = 0;
1219 comp = &((struct ionic_txq_comp *)cq->base)[cq->tail_idx];
1221 if (!color_match(comp->color, cq->done_color))
1224 /* clean the related q entries, there could be
1225 * several q entries completed for each cq completion
1228 desc_info = &q->tx_info[q->tail_idx];
1229 desc_info->bytes = 0;
1230 index = q->tail_idx;
1231 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
1232 ionic_tx_clean(q, desc_info, comp);
1233 if (desc_info->skb) {
1235 bytes += desc_info->bytes;
1236 desc_info->skb = NULL;
1238 } while (index != le16_to_cpu(comp->comp_index));
1240 (*total_pkts) += pkts;
1241 (*total_bytes) += bytes;
1246 unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
1248 unsigned int work_done = 0;
1249 unsigned int bytes = 0;
1250 unsigned int pkts = 0;
1252 if (work_to_do == 0)
1255 while (ionic_tx_service(cq, &pkts, &bytes)) {
1256 if (cq->tail_idx == cq->num_descs - 1)
1257 cq->done_color = !cq->done_color;
1258 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
1260 if (++work_done >= work_to_do)
1265 struct ionic_queue *q = cq->bound_q;
1267 if (likely(!ionic_txq_hwstamp_enabled(q)))
1268 netif_txq_completed_wake(q_to_ndq(q->lif->netdev, q),
1270 ionic_q_space_avail(q),
1271 IONIC_TSO_DESCS_NEEDED);
1277 void ionic_tx_flush(struct ionic_cq *cq)
1281 work_done = ionic_tx_cq_service(cq, cq->num_descs);
1283 ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
1284 work_done, IONIC_INTR_CRED_RESET_COALESCE);
1287 void ionic_tx_empty(struct ionic_queue *q)
1289 struct ionic_tx_desc_info *desc_info;
1293 /* walk the not completed tx entries, if any */
1294 while (q->head_idx != q->tail_idx) {
1295 desc_info = &q->tx_info[q->tail_idx];
1296 desc_info->bytes = 0;
1297 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
1298 ionic_tx_clean(q, desc_info, NULL);
1299 if (desc_info->skb) {
1301 bytes += desc_info->bytes;
1302 desc_info->skb = NULL;
1306 if (likely(!ionic_txq_hwstamp_enabled(q))) {
1307 struct netdev_queue *ndq = q_to_ndq(q->lif->netdev, q);
1309 netdev_tx_completed_queue(ndq, pkts, bytes);
1310 netdev_tx_reset_queue(ndq);
1314 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
1318 err = skb_cow_head(skb, 0);
1322 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1323 inner_ip_hdr(skb)->check = 0;
1324 inner_tcp_hdr(skb)->check =
1325 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
1326 inner_ip_hdr(skb)->daddr,
1328 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
1329 inner_tcp_hdr(skb)->check =
1330 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
1331 &inner_ipv6_hdr(skb)->daddr,
1338 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
1342 err = skb_cow_head(skb, 0);
1346 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1347 ip_hdr(skb)->check = 0;
1348 tcp_hdr(skb)->check =
1349 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1352 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
1353 tcp_v6_gso_csum_prep(skb);
1359 static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
1360 struct ionic_tx_desc_info *desc_info,
1361 struct sk_buff *skb,
1362 dma_addr_t addr, u8 nsge, u16 len,
1363 unsigned int hdrlen, unsigned int mss,
1365 u16 vlan_tci, bool has_vlan,
1366 bool start, bool done)
1368 struct ionic_txq_desc *desc = &q->txq[q->head_idx];
1372 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1373 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1374 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
1375 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
1377 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
1378 desc->cmd = cpu_to_le64(cmd);
1379 desc->len = cpu_to_le16(len);
1380 desc->vlan_tci = cpu_to_le16(vlan_tci);
1381 desc->hdr_len = cpu_to_le16(hdrlen);
1382 desc->mss = cpu_to_le16(mss);
1384 ionic_write_cmb_desc(q, desc);
1387 skb_tx_timestamp(skb);
1388 if (likely(!ionic_txq_hwstamp_enabled(q)))
1389 netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
1390 ionic_txq_post(q, false);
1392 ionic_txq_post(q, done);
1396 static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
1397 struct sk_buff *skb)
1399 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1400 struct ionic_tx_desc_info *desc_info;
1401 struct ionic_buf_info *buf_info;
1402 struct ionic_txq_sg_elem *elem;
1403 struct ionic_txq_desc *desc;
1404 unsigned int chunk_len;
1405 unsigned int frag_rem;
1406 unsigned int tso_rem;
1407 unsigned int seg_rem;
1408 dma_addr_t desc_addr;
1409 dma_addr_t frag_addr;
1410 unsigned int hdrlen;
1422 desc_info = &q->tx_info[q->head_idx];
1424 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1428 mss = skb_shinfo(skb)->gso_size;
1429 outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1433 SKB_GSO_UDP_TUNNEL |
1434 SKB_GSO_UDP_TUNNEL_CSUM));
1435 has_vlan = !!skb_vlan_tag_present(skb);
1436 vlan_tci = skb_vlan_tag_get(skb);
1437 encap = skb->encapsulation;
1439 /* Preload inner-most TCP csum field with IP pseudo hdr
1440 * calculated with IP length set to zero. HW will later
1441 * add in length to each TCP segment resulting from the TSO.
1445 err = ionic_tx_tcp_inner_pseudo_csum(skb);
1447 err = ionic_tx_tcp_pseudo_csum(skb);
1449 /* clean up mapping from ionic_tx_map_skb */
1450 ionic_tx_desc_unmap_bufs(q, desc_info);
1455 hdrlen = skb_inner_tcp_all_headers(skb);
1457 hdrlen = skb_tcp_all_headers(skb);
1459 desc_info->skb = skb;
1460 buf_info = desc_info->bufs;
1462 seg_rem = min(tso_rem, hdrlen + mss);
1469 while (tso_rem > 0) {
1475 /* use fragments until we have enough to post a single descriptor */
1476 while (seg_rem > 0) {
1477 /* if the fragment is exhausted then move to the next one */
1478 if (frag_rem == 0) {
1479 /* grab the next fragment */
1480 frag_addr = buf_info->dma_addr;
1481 frag_rem = buf_info->len;
1484 chunk_len = min(frag_rem, seg_rem);
1486 /* fill main descriptor */
1487 desc = &q->txq[q->head_idx];
1488 elem = ionic_tx_sg_elems(q);
1489 desc_addr = frag_addr;
1490 desc_len = chunk_len;
1492 /* fill sg descriptor */
1493 elem->addr = cpu_to_le64(frag_addr);
1494 elem->len = cpu_to_le16(chunk_len);
1498 frag_addr += chunk_len;
1499 frag_rem -= chunk_len;
1500 tso_rem -= chunk_len;
1501 seg_rem -= chunk_len;
1503 seg_rem = min(tso_rem, mss);
1504 done = (tso_rem == 0);
1505 /* post descriptor */
1506 ionic_tx_tso_post(netdev, q, desc_info, skb,
1507 desc_addr, desc_nsge, desc_len,
1508 hdrlen, mss, outer_csum, vlan_tci, has_vlan,
1511 /* Buffer information is stored with the first tso descriptor */
1512 desc_info = &q->tx_info[q->head_idx];
1513 desc_info->nbufs = 0;
1516 stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1517 stats->bytes += len;
1519 stats->tso_bytes = len;
1524 static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1525 struct ionic_tx_desc_info *desc_info)
1527 struct ionic_txq_desc *desc = &q->txq[q->head_idx];
1528 struct ionic_buf_info *buf_info = desc_info->bufs;
1529 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1535 has_vlan = !!skb_vlan_tag_present(skb);
1536 encap = skb->encapsulation;
1538 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1539 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1541 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1542 flags, skb_shinfo(skb)->nr_frags,
1543 buf_info->dma_addr);
1544 desc->cmd = cpu_to_le64(cmd);
1545 desc->len = cpu_to_le16(buf_info->len);
1547 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1548 stats->vlan_inserted++;
1552 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1553 desc->csum_offset = cpu_to_le16(skb->csum_offset);
1555 ionic_write_cmb_desc(q, desc);
1557 if (skb_csum_is_sctp(skb))
1558 stats->crc32_csum++;
1563 static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1564 struct ionic_tx_desc_info *desc_info)
1566 struct ionic_txq_desc *desc = &q->txq[q->head_idx];
1567 struct ionic_buf_info *buf_info = desc_info->bufs;
1568 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1574 has_vlan = !!skb_vlan_tag_present(skb);
1575 encap = skb->encapsulation;
1577 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1578 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1580 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1581 flags, skb_shinfo(skb)->nr_frags,
1582 buf_info->dma_addr);
1583 desc->cmd = cpu_to_le64(cmd);
1584 desc->len = cpu_to_le16(buf_info->len);
1586 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1587 stats->vlan_inserted++;
1591 desc->csum_start = 0;
1592 desc->csum_offset = 0;
1594 ionic_write_cmb_desc(q, desc);
1599 static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1600 struct ionic_tx_desc_info *desc_info)
1602 struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1603 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1604 struct ionic_txq_sg_elem *elem;
1607 elem = ionic_tx_sg_elems(q);
1608 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1609 elem->addr = cpu_to_le64(buf_info->dma_addr);
1610 elem->len = cpu_to_le16(buf_info->len);
1613 stats->frags += skb_shinfo(skb)->nr_frags;
1616 static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
1617 struct sk_buff *skb)
1619 struct ionic_tx_desc_info *desc_info = &q->tx_info[q->head_idx];
1620 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1621 bool ring_dbell = true;
1623 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1626 desc_info->skb = skb;
1628 /* set up the initial descriptor */
1629 if (skb->ip_summed == CHECKSUM_PARTIAL)
1630 ionic_tx_calc_csum(q, skb, desc_info);
1632 ionic_tx_calc_no_csum(q, skb, desc_info);
1635 ionic_tx_skb_frags(q, skb, desc_info);
1637 skb_tx_timestamp(skb);
1639 stats->bytes += skb->len;
1641 if (likely(!ionic_txq_hwstamp_enabled(q))) {
1642 struct netdev_queue *ndq = q_to_ndq(netdev, q);
1644 if (unlikely(!ionic_q_has_space(q, MAX_SKB_FRAGS + 1)))
1645 netif_tx_stop_queue(ndq);
1646 ring_dbell = __netdev_tx_sent_queue(ndq, skb->len,
1647 netdev_xmit_more());
1649 ionic_txq_post(q, ring_dbell);
1654 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1656 int nr_frags = skb_shinfo(skb)->nr_frags;
1657 bool too_many_frags = false;
1669 /* Each desc is mss long max, so a descriptor for each gso_seg */
1670 if (skb_is_gso(skb)) {
1671 ndescs = skb_shinfo(skb)->gso_segs;
1679 if (unlikely(nr_frags > q->max_sg_elems)) {
1680 too_many_frags = true;
1687 /* We need to scan the skb to be sure that none of the MTU sized
1688 * packets in the TSO will require more sgs per descriptor than we
1689 * can support. We loop through the frags, add up the lengths for
1690 * a packet, and count the number of sgs used per packet.
1693 frag = skb_shinfo(skb)->frags;
1694 encap = skb->encapsulation;
1696 /* start with just hdr in first part of first descriptor */
1698 hdrlen = skb_inner_tcp_all_headers(skb);
1700 hdrlen = skb_tcp_all_headers(skb);
1701 seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size);
1704 while (tso_rem > 0) {
1706 while (seg_rem > 0) {
1709 /* We add the +1 because we can take buffers for one
1710 * more than we have SGs: one for the initial desc data
1711 * in addition to the SG segments that might follow.
1713 if (desc_bufs > q->max_sg_elems + 1) {
1714 too_many_frags = true;
1718 if (frag_rem == 0) {
1719 frag_rem = skb_frag_size(frag);
1722 chunk_len = min(frag_rem, seg_rem);
1723 frag_rem -= chunk_len;
1724 tso_rem -= chunk_len;
1725 seg_rem -= chunk_len;
1728 seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size);
1732 if (too_many_frags) {
1733 err = skb_linearize(skb);
1736 q_to_tx_stats(q)->linearize++;
1742 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1743 struct net_device *netdev)
1745 struct ionic_lif *lif = netdev_priv(netdev);
1746 struct ionic_queue *q;
1749 /* Does not stop/start txq, because we post to a separate tx queue
1750 * for timestamping, and if a packet can't be posted immediately to
1751 * the timestamping queue, it is dropped.
1754 q = &lif->hwstamp_txq->q;
1755 ndescs = ionic_tx_descs_needed(q, skb);
1756 if (unlikely(ndescs < 0))
1759 if (unlikely(!ionic_q_has_space(q, ndescs)))
1762 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1763 if (skb_is_gso(skb))
1764 err = ionic_tx_tso(netdev, q, skb);
1766 err = ionic_tx(netdev, q, skb);
1771 return NETDEV_TX_OK;
1776 return NETDEV_TX_OK;
1779 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1781 u16 queue_index = skb_get_queue_mapping(skb);
1782 struct ionic_lif *lif = netdev_priv(netdev);
1783 struct ionic_queue *q;
1787 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1789 return NETDEV_TX_OK;
1792 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1793 if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1794 return ionic_start_hwstamp_xmit(skb, netdev);
1796 if (unlikely(queue_index >= lif->nxqs))
1798 q = &lif->txqcqs[queue_index]->q;
1800 ndescs = ionic_tx_descs_needed(q, skb);
1804 if (!netif_txq_maybe_stop(q_to_ndq(netdev, q),
1805 ionic_q_space_avail(q),
1807 return NETDEV_TX_BUSY;
1809 if (skb_is_gso(skb))
1810 err = ionic_tx_tso(netdev, q, skb);
1812 err = ionic_tx(netdev, q, skb);
1817 return NETDEV_TX_OK;
1822 return NETDEV_TX_OK;