1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
13 static void ionic_rx_clean(struct ionic_queue *q,
14 struct ionic_desc_info *desc_info,
15 struct ionic_cq_info *cq_info,
18 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
20 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
22 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
23 ionic_desc_cb cb_func, void *cb_arg)
25 DEBUG_STATS_TXQ_POST(q, ring_dbell);
27 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
30 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
31 ionic_desc_cb cb_func, void *cb_arg)
33 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
35 DEBUG_STATS_RX_BUFF_CNT(q);
38 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
40 return netdev_get_tx_queue(q->lif->netdev, q->index);
43 static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
44 unsigned int len, bool frags)
46 struct ionic_lif *lif = q->lif;
47 struct ionic_rx_stats *stats;
48 struct net_device *netdev;
52 stats = &q->lif->rxqstats[q->index];
55 skb = napi_get_frags(&q_to_qcq(q)->napi);
57 skb = netdev_alloc_skb_ip_align(netdev, len);
60 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
61 netdev->name, q->name);
69 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
70 struct ionic_desc_info *desc_info,
71 struct ionic_cq_info *cq_info)
73 struct ionic_rxq_comp *comp = cq_info->cq_desc;
74 struct device *dev = q->lif->ionic->dev;
75 struct ionic_page_info *page_info;
81 page_info = &desc_info->pages[0];
82 len = le16_to_cpu(comp->len);
84 prefetch(page_address(page_info->page) + NET_IP_ALIGN);
86 skb = ionic_rx_skb_alloc(q, len, true);
90 i = comp->num_sg_elems + 1;
92 if (unlikely(!page_info->page)) {
93 struct napi_struct *napi = &q_to_qcq(q)->napi;
100 frag_len = min(len, (u16)PAGE_SIZE);
103 dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
104 PAGE_SIZE, DMA_FROM_DEVICE);
105 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
106 page_info->page, 0, frag_len, PAGE_SIZE);
107 page_info->page = NULL;
115 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
116 struct ionic_desc_info *desc_info,
117 struct ionic_cq_info *cq_info)
119 struct ionic_rxq_comp *comp = cq_info->cq_desc;
120 struct device *dev = q->lif->ionic->dev;
121 struct ionic_page_info *page_info;
125 page_info = &desc_info->pages[0];
126 len = le16_to_cpu(comp->len);
128 skb = ionic_rx_skb_alloc(q, len, false);
132 if (unlikely(!page_info->page)) {
137 dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
138 len, DMA_FROM_DEVICE);
139 skb_copy_to_linear_data(skb, page_address(page_info->page), len);
140 dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
141 len, DMA_FROM_DEVICE);
144 skb->protocol = eth_type_trans(skb, q->lif->netdev);
149 static void ionic_rx_clean(struct ionic_queue *q,
150 struct ionic_desc_info *desc_info,
151 struct ionic_cq_info *cq_info,
154 struct ionic_rxq_comp *comp = cq_info->cq_desc;
155 struct ionic_qcq *qcq = q_to_qcq(q);
156 struct ionic_rx_stats *stats;
157 struct net_device *netdev;
160 stats = q_to_rx_stats(q);
161 netdev = q->lif->netdev;
169 stats->bytes += le16_to_cpu(comp->len);
171 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
172 skb = ionic_rx_copybreak(q, desc_info, cq_info);
174 skb = ionic_rx_frags(q, desc_info, cq_info);
176 if (unlikely(!skb)) {
181 skb_record_rx_queue(skb, q->index);
183 if (likely(netdev->features & NETIF_F_RXHASH)) {
184 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
185 case IONIC_PKT_TYPE_IPV4:
186 case IONIC_PKT_TYPE_IPV6:
187 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
190 case IONIC_PKT_TYPE_IPV4_TCP:
191 case IONIC_PKT_TYPE_IPV6_TCP:
192 case IONIC_PKT_TYPE_IPV4_UDP:
193 case IONIC_PKT_TYPE_IPV6_UDP:
194 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
200 if (likely(netdev->features & NETIF_F_RXCSUM)) {
201 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
202 skb->ip_summed = CHECKSUM_COMPLETE;
203 skb->csum = (__wsum)le16_to_cpu(comp->csum);
204 stats->csum_complete++;
210 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
211 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
212 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
215 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
216 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
217 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
218 le16_to_cpu(comp->vlan_tci));
219 stats->vlan_stripped++;
222 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
223 napi_gro_receive(&qcq->napi, skb);
225 napi_gro_frags(&qcq->napi);
228 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
230 struct ionic_rxq_comp *comp = cq_info->cq_desc;
231 struct ionic_queue *q = cq->bound_q;
232 struct ionic_desc_info *desc_info;
234 if (!color_match(comp->pkt_type_color, cq->done_color))
237 /* check for empty queue */
238 if (q->tail_idx == q->head_idx)
241 if (q->tail_idx != le16_to_cpu(comp->comp_index))
244 desc_info = &q->info[q->tail_idx];
245 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
247 /* clean the related q entry, only one per qc completion */
248 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
250 desc_info->cb = NULL;
251 desc_info->cb_arg = NULL;
256 void ionic_rx_flush(struct ionic_cq *cq)
258 struct ionic_dev *idev = &cq->lif->ionic->idev;
261 work_done = ionic_cq_service(cq, cq->num_descs,
262 ionic_rx_service, NULL, NULL);
265 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
266 work_done, IONIC_INTR_CRED_RESET_COALESCE);
269 static int ionic_rx_page_alloc(struct ionic_queue *q,
270 struct ionic_page_info *page_info)
272 struct ionic_lif *lif = q->lif;
273 struct ionic_rx_stats *stats;
274 struct net_device *netdev;
277 netdev = lif->netdev;
278 dev = lif->ionic->dev;
279 stats = q_to_rx_stats(q);
281 if (unlikely(!page_info)) {
282 net_err_ratelimited("%s: %s invalid page_info in alloc\n",
283 netdev->name, q->name);
287 page_info->page = dev_alloc_page();
288 if (unlikely(!page_info->page)) {
289 net_err_ratelimited("%s: %s page alloc failed\n",
290 netdev->name, q->name);
295 page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
297 if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
298 put_page(page_info->page);
299 page_info->dma_addr = 0;
300 page_info->page = NULL;
301 net_err_ratelimited("%s: %s dma map failed\n",
302 netdev->name, q->name);
303 stats->dma_map_err++;
310 static void ionic_rx_page_free(struct ionic_queue *q,
311 struct ionic_page_info *page_info)
313 struct ionic_lif *lif = q->lif;
314 struct net_device *netdev;
317 netdev = lif->netdev;
318 dev = lif->ionic->dev;
320 if (unlikely(!page_info)) {
321 net_err_ratelimited("%s: %s invalid page_info in free\n",
322 netdev->name, q->name);
326 if (unlikely(!page_info->page)) {
327 net_err_ratelimited("%s: %s invalid page in free\n",
328 netdev->name, q->name);
332 dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
334 put_page(page_info->page);
335 page_info->dma_addr = 0;
336 page_info->page = NULL;
339 void ionic_rx_fill(struct ionic_queue *q)
341 struct net_device *netdev = q->lif->netdev;
342 struct ionic_desc_info *desc_info;
343 struct ionic_page_info *page_info;
344 struct ionic_rxq_sg_desc *sg_desc;
345 struct ionic_rxq_sg_elem *sg_elem;
346 struct ionic_rxq_desc *desc;
347 unsigned int remain_len;
348 unsigned int seg_len;
353 len = netdev->mtu + ETH_HLEN;
354 nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
356 for (i = ionic_q_space_avail(q); i; i--) {
358 desc_info = &q->info[q->head_idx];
359 desc = desc_info->desc;
360 sg_desc = desc_info->sg_desc;
361 page_info = &desc_info->pages[0];
363 if (page_info->page) { /* recycle the buffer */
364 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
368 /* fill main descriptor - pages[0] */
369 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
370 IONIC_RXQ_DESC_OPCODE_SIMPLE;
371 desc_info->npages = nfrags;
372 if (unlikely(ionic_rx_page_alloc(q, page_info))) {
377 desc->addr = cpu_to_le64(page_info->dma_addr);
378 seg_len = min_t(unsigned int, PAGE_SIZE, len);
379 desc->len = cpu_to_le16(seg_len);
380 remain_len -= seg_len;
383 /* fill sg descriptors - pages[1..n] */
384 for (j = 0; j < nfrags - 1; j++) {
385 if (page_info->page) /* recycle the sg buffer */
388 sg_elem = &sg_desc->elems[j];
389 if (unlikely(ionic_rx_page_alloc(q, page_info))) {
394 sg_elem->addr = cpu_to_le64(page_info->dma_addr);
395 seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
396 sg_elem->len = cpu_to_le16(seg_len);
397 remain_len -= seg_len;
401 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
404 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
405 q->dbval | q->head_idx);
408 static void ionic_rx_fill_cb(void *arg)
413 void ionic_rx_empty(struct ionic_queue *q)
415 struct ionic_desc_info *desc_info;
416 struct ionic_rxq_desc *desc;
421 while (idx != q->head_idx) {
422 desc_info = &q->info[idx];
423 desc = desc_info->desc;
427 for (i = 0; i < desc_info->npages; i++)
428 ionic_rx_page_free(q, &desc_info->pages[i]);
430 desc_info->cb_arg = NULL;
431 idx = (idx + 1) & (q->num_descs - 1);
435 static void ionic_dim_update(struct ionic_qcq *qcq)
437 struct dim_sample dim_sample;
438 struct ionic_lif *lif;
441 if (!qcq->intr.dim_coal_hw)
445 qi = qcq->cq.bound_q->index;
447 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
448 lif->rxqcqs[qi]->intr.index,
449 qcq->intr.dim_coal_hw);
451 dim_update_sample(qcq->cq.bound_intr->rearm_count,
452 lif->txqstats[qi].pkts,
453 lif->txqstats[qi].bytes,
456 net_dim(&qcq->dim, dim_sample);
459 int ionic_tx_napi(struct napi_struct *napi, int budget)
461 struct ionic_qcq *qcq = napi_to_qcq(napi);
462 struct ionic_cq *cq = napi_to_cq(napi);
463 struct ionic_dev *idev;
464 struct ionic_lif *lif;
468 lif = cq->bound_q->lif;
469 idev = &lif->ionic->idev;
471 work_done = ionic_cq_service(cq, budget,
472 ionic_tx_service, NULL, NULL);
474 if (work_done < budget && napi_complete_done(napi, work_done)) {
475 ionic_dim_update(qcq);
476 flags |= IONIC_INTR_CRED_UNMASK;
477 cq->bound_intr->rearm_count++;
480 if (work_done || flags) {
481 flags |= IONIC_INTR_CRED_RESET_COALESCE;
482 ionic_intr_credits(idev->intr_ctrl,
483 cq->bound_intr->index,
487 DEBUG_STATS_NAPI_POLL(qcq, work_done);
492 int ionic_rx_napi(struct napi_struct *napi, int budget)
494 struct ionic_qcq *qcq = napi_to_qcq(napi);
495 struct ionic_cq *cq = napi_to_cq(napi);
496 struct ionic_dev *idev;
497 struct ionic_lif *lif;
501 lif = cq->bound_q->lif;
502 idev = &lif->ionic->idev;
504 work_done = ionic_cq_service(cq, budget,
505 ionic_rx_service, NULL, NULL);
508 ionic_rx_fill(cq->bound_q);
510 if (work_done < budget && napi_complete_done(napi, work_done)) {
511 ionic_dim_update(qcq);
512 flags |= IONIC_INTR_CRED_UNMASK;
513 cq->bound_intr->rearm_count++;
516 if (work_done || flags) {
517 flags |= IONIC_INTR_CRED_RESET_COALESCE;
518 ionic_intr_credits(idev->intr_ctrl,
519 cq->bound_intr->index,
523 DEBUG_STATS_NAPI_POLL(qcq, work_done);
528 int ionic_txrx_napi(struct napi_struct *napi, int budget)
530 struct ionic_qcq *qcq = napi_to_qcq(napi);
531 struct ionic_cq *rxcq = napi_to_cq(napi);
532 unsigned int qi = rxcq->bound_q->index;
533 struct ionic_dev *idev;
534 struct ionic_lif *lif;
535 struct ionic_cq *txcq;
536 u32 rx_work_done = 0;
537 u32 tx_work_done = 0;
540 lif = rxcq->bound_q->lif;
541 idev = &lif->ionic->idev;
542 txcq = &lif->txqcqs[qi]->cq;
544 tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
545 ionic_tx_service, NULL, NULL);
547 rx_work_done = ionic_cq_service(rxcq, budget,
548 ionic_rx_service, NULL, NULL);
550 ionic_rx_fill_cb(rxcq->bound_q);
552 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
553 ionic_dim_update(qcq);
554 flags |= IONIC_INTR_CRED_UNMASK;
555 rxcq->bound_intr->rearm_count++;
558 if (rx_work_done || flags) {
559 flags |= IONIC_INTR_CRED_RESET_COALESCE;
560 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
561 tx_work_done + rx_work_done, flags);
564 DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
565 DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
570 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
571 void *data, size_t len)
573 struct ionic_tx_stats *stats = q_to_tx_stats(q);
574 struct device *dev = q->lif->ionic->dev;
577 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
578 if (dma_mapping_error(dev, dma_addr)) {
579 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
580 q->lif->netdev->name, q->name);
581 stats->dma_map_err++;
587 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
588 const skb_frag_t *frag,
589 size_t offset, size_t len)
591 struct ionic_tx_stats *stats = q_to_tx_stats(q);
592 struct device *dev = q->lif->ionic->dev;
595 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
596 if (dma_mapping_error(dev, dma_addr)) {
597 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
598 q->lif->netdev->name, q->name);
599 stats->dma_map_err++;
604 static void ionic_tx_clean(struct ionic_queue *q,
605 struct ionic_desc_info *desc_info,
606 struct ionic_cq_info *cq_info,
609 struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
610 struct ionic_txq_sg_elem *elem = sg_desc->elems;
611 struct ionic_tx_stats *stats = q_to_tx_stats(q);
612 struct ionic_txq_desc *desc = desc_info->desc;
613 struct device *dev = q->lif->ionic->dev;
614 u8 opcode, flags, nsge;
619 decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
620 &opcode, &flags, &nsge, &addr);
622 /* use unmap_single only if either this is not TSO,
623 * or this is first descriptor of a TSO
625 if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
626 flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
627 dma_unmap_single(dev, (dma_addr_t)addr,
628 le16_to_cpu(desc->len), DMA_TO_DEVICE);
630 dma_unmap_page(dev, (dma_addr_t)addr,
631 le16_to_cpu(desc->len), DMA_TO_DEVICE);
633 for (i = 0; i < nsge; i++, elem++)
634 dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
635 le16_to_cpu(elem->len), DMA_TO_DEVICE);
638 struct sk_buff *skb = cb_arg;
641 queue_index = skb_get_queue_mapping(skb);
642 if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
644 netif_wake_subqueue(q->lif->netdev, queue_index);
647 dev_kfree_skb_any(skb);
649 netdev_tx_completed_queue(q_to_ndq(q), 1, len);
653 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
655 struct ionic_txq_comp *comp = cq_info->cq_desc;
656 struct ionic_queue *q = cq->bound_q;
657 struct ionic_desc_info *desc_info;
660 if (!color_match(comp->color, cq->done_color))
663 /* clean the related q entries, there could be
664 * several q entries completed for each cq completion
667 desc_info = &q->info[q->tail_idx];
669 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
670 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
671 desc_info->cb = NULL;
672 desc_info->cb_arg = NULL;
673 } while (index != le16_to_cpu(comp->comp_index));
678 void ionic_tx_flush(struct ionic_cq *cq)
680 struct ionic_dev *idev = &cq->lif->ionic->idev;
683 work_done = ionic_cq_service(cq, cq->num_descs,
684 ionic_tx_service, NULL, NULL);
686 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
687 work_done, IONIC_INTR_CRED_RESET_COALESCE);
690 void ionic_tx_empty(struct ionic_queue *q)
692 struct ionic_desc_info *desc_info;
694 /* walk the not completed tx entries, if any */
695 while (q->head_idx != q->tail_idx) {
696 desc_info = &q->info[q->tail_idx];
697 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
698 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
699 desc_info->cb = NULL;
700 desc_info->cb_arg = NULL;
704 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
708 err = skb_cow_head(skb, 0);
712 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
713 inner_ip_hdr(skb)->check = 0;
714 inner_tcp_hdr(skb)->check =
715 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
716 inner_ip_hdr(skb)->daddr,
718 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
719 inner_tcp_hdr(skb)->check =
720 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
721 &inner_ipv6_hdr(skb)->daddr,
728 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
732 err = skb_cow_head(skb, 0);
736 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
737 ip_hdr(skb)->check = 0;
738 tcp_hdr(skb)->check =
739 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
742 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
743 tcp_v6_gso_csum_prep(skb);
749 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
751 dma_addr_t addr, u8 nsge, u16 len,
752 unsigned int hdrlen, unsigned int mss,
754 u16 vlan_tci, bool has_vlan,
755 bool start, bool done)
760 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
761 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
762 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
763 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
765 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
766 desc->cmd = cpu_to_le64(cmd);
767 desc->len = cpu_to_le16(len);
768 desc->vlan_tci = cpu_to_le16(vlan_tci);
769 desc->hdr_len = cpu_to_le16(hdrlen);
770 desc->mss = cpu_to_le16(mss);
773 skb_tx_timestamp(skb);
774 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
775 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
777 ionic_txq_post(q, false, ionic_tx_clean, NULL);
781 static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
782 struct ionic_txq_sg_elem **elem)
784 struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
785 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
787 *elem = sg_desc->elems;
791 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
793 struct ionic_tx_stats *stats = q_to_tx_stats(q);
794 struct ionic_desc_info *rewind_desc_info;
795 struct device *dev = q->lif->ionic->dev;
796 struct ionic_txq_sg_elem *elem;
797 struct ionic_txq_desc *desc;
798 unsigned int frag_left = 0;
799 unsigned int offset = 0;
800 u16 abort = q->head_idx;
801 unsigned int len_left;
802 dma_addr_t desc_addr;
822 mss = skb_shinfo(skb)->gso_size;
823 nfrags = skb_shinfo(skb)->nr_frags;
824 len_left = skb->len - skb_headlen(skb);
825 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
826 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
827 has_vlan = !!skb_vlan_tag_present(skb);
828 vlan_tci = skb_vlan_tag_get(skb);
829 encap = skb->encapsulation;
831 /* Preload inner-most TCP csum field with IP pseudo hdr
832 * calculated with IP length set to zero. HW will later
833 * add in length to each TCP segment resulting from the TSO.
837 err = ionic_tx_tcp_inner_pseudo_csum(skb);
839 err = ionic_tx_tcp_pseudo_csum(skb);
844 hdrlen = skb_inner_transport_header(skb) - skb->data +
845 inner_tcp_hdrlen(skb);
847 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
849 seglen = hdrlen + mss;
850 left = skb_headlen(skb);
852 desc = ionic_tx_tso_next(q, &elem);
855 /* Chop skb->data up into desc segments */
858 len = min(seglen, left);
859 frag_left = seglen - len;
860 desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
861 if (dma_mapping_error(dev, desc_addr))
867 if (nfrags > 0 && frag_left > 0)
869 done = (nfrags == 0 && left == 0);
870 ionic_tx_tso_post(q, desc, skb,
871 desc_addr, desc_nsge, desc_len,
877 total_bytes += start ? len : len + hdrlen;
878 desc = ionic_tx_tso_next(q, &elem);
883 /* Chop skb frags into desc segments */
885 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
887 left = skb_frag_size(frag);
894 len = min(frag_left, left);
897 cpu_to_le64(ionic_tx_map_frag(q, frag,
899 if (dma_mapping_error(dev, elem->addr))
901 elem->len = cpu_to_le16(len);
906 if (nfrags > 0 && frag_left > 0)
908 done = (nfrags == 0 && left == 0);
909 ionic_tx_tso_post(q, desc, skb, desc_addr,
911 hdrlen, mss, outer_csum,
915 total_bytes += start ? len : len + hdrlen;
916 desc = ionic_tx_tso_next(q, &elem);
919 len = min(mss, left);
920 frag_left = mss - len;
921 desc_addr = ionic_tx_map_frag(q, frag,
923 if (dma_mapping_error(dev, desc_addr))
929 if (nfrags > 0 && frag_left > 0)
931 done = (nfrags == 0 && left == 0);
932 ionic_tx_tso_post(q, desc, skb, desc_addr,
934 hdrlen, mss, outer_csum,
938 total_bytes += start ? len : len + hdrlen;
939 desc = ionic_tx_tso_next(q, &elem);
945 stats->pkts += total_pkts;
946 stats->bytes += total_bytes;
948 stats->tso_bytes += total_bytes;
953 while (rewind != q->head_idx) {
954 rewind_desc_info = &q->info[rewind];
955 ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
956 rewind = (rewind + 1) & (q->num_descs - 1);
963 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
965 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
966 struct ionic_tx_stats *stats = q_to_tx_stats(q);
967 struct device *dev = q->lif->ionic->dev;
974 has_vlan = !!skb_vlan_tag_present(skb);
975 encap = skb->encapsulation;
977 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
978 if (dma_mapping_error(dev, dma_addr))
981 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
982 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
984 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
985 flags, skb_shinfo(skb)->nr_frags, dma_addr);
986 desc->cmd = cpu_to_le64(cmd);
987 desc->len = cpu_to_le16(skb_headlen(skb));
988 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
989 desc->csum_offset = cpu_to_le16(skb->csum_offset);
991 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
992 stats->vlan_inserted++;
995 if (skb->csum_not_inet)
1003 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
1005 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
1006 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1007 struct device *dev = q->lif->ionic->dev;
1008 dma_addr_t dma_addr;
1014 has_vlan = !!skb_vlan_tag_present(skb);
1015 encap = skb->encapsulation;
1017 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
1018 if (dma_mapping_error(dev, dma_addr))
1021 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1022 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1024 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1025 flags, skb_shinfo(skb)->nr_frags, dma_addr);
1026 desc->cmd = cpu_to_le64(cmd);
1027 desc->len = cpu_to_le16(skb_headlen(skb));
1029 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1030 stats->vlan_inserted++;
1038 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
1040 struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
1041 unsigned int len_left = skb->len - skb_headlen(skb);
1042 struct ionic_txq_sg_elem *elem = sg_desc->elems;
1043 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1044 struct device *dev = q->lif->ionic->dev;
1045 dma_addr_t dma_addr;
1049 for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
1050 len = skb_frag_size(frag);
1051 elem->len = cpu_to_le16(len);
1052 dma_addr = ionic_tx_map_frag(q, frag, 0, len);
1053 if (dma_mapping_error(dev, dma_addr))
1055 elem->addr = cpu_to_le64(dma_addr);
1063 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1065 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1068 /* set up the initial descriptor */
1069 if (skb->ip_summed == CHECKSUM_PARTIAL)
1070 err = ionic_tx_calc_csum(q, skb);
1072 err = ionic_tx_calc_no_csum(q, skb);
1077 err = ionic_tx_skb_frags(q, skb);
1081 skb_tx_timestamp(skb);
1083 stats->bytes += skb->len;
1085 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1086 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1091 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1093 int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
1094 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1097 /* If TSO, need roundup(skb->len/mss) descs */
1098 if (skb_is_gso(skb))
1099 return (skb->len / skb_shinfo(skb)->gso_size) + 1;
1101 /* If non-TSO, just need 1 desc and nr_frags sg elems */
1102 if (skb_shinfo(skb)->nr_frags <= sg_elems)
1105 /* Too many frags, so linearize */
1106 err = skb_linearize(skb);
1112 /* Need 1 desc and zero sg elems */
1116 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1120 if (unlikely(!ionic_q_has_space(q, ndescs))) {
1121 netif_stop_subqueue(q->lif->netdev, q->index);
1125 /* Might race with ionic_tx_clean, check again */
1127 if (ionic_q_has_space(q, ndescs)) {
1128 netif_wake_subqueue(q->lif->netdev, q->index);
1136 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1138 u16 queue_index = skb_get_queue_mapping(skb);
1139 struct ionic_lif *lif = netdev_priv(netdev);
1140 struct ionic_queue *q;
1144 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1146 return NETDEV_TX_OK;
1149 if (unlikely(queue_index >= lif->nxqs))
1151 q = &lif->txqcqs[queue_index]->q;
1153 ndescs = ionic_tx_descs_needed(q, skb);
1157 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1158 return NETDEV_TX_BUSY;
1160 if (skb_is_gso(skb))
1161 err = ionic_tx_tso(q, skb);
1163 err = ionic_tx(q, skb);
1168 /* Stop the queue if there aren't descriptors for the next packet.
1169 * Since our SG lists per descriptor take care of most of the possible
1170 * fragmentation, we don't need to have many descriptors available.
1172 ionic_maybe_stop_tx(q, 4);
1174 return NETDEV_TX_OK;
1180 return NETDEV_TX_OK;