1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/netdevice.h>
9 #include <linux/u64_stats_sync.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/skbuff.h>
18 #include <linux/smp.h>
19 #include <asm/byteorder.h>
21 #include <linux/tcp.h>
22 #include <linux/sctp.h>
23 #include <linux/ipv6.h>
25 #include <net/checksum.h>
26 #include <net/ip6_checksum.h>
28 #include "hinic_common.h"
29 #include "hinic_hw_if.h"
30 #include "hinic_hw_wqe.h"
31 #include "hinic_hw_wq.h"
32 #include "hinic_hw_qp.h"
33 #include "hinic_hw_dev.h"
34 #include "hinic_dev.h"
37 #define TX_IRQ_NO_PENDING 0
38 #define TX_IRQ_NO_COALESC 0
39 #define TX_IRQ_NO_LLI_TIMER 0
40 #define TX_IRQ_NO_CREDIT 0
41 #define TX_IRQ_NO_RESEND_TIMER 0
43 #define CI_UPDATE_NO_PENDING 0
44 #define CI_UPDATE_NO_COALESC 0
46 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
48 #define MIN_SKB_LEN 32
50 #define MAX_PAYLOAD_OFFSET 221
51 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
65 enum hinic_offload_type {
66 TX_OFFLOAD_TSO = BIT(0),
67 TX_OFFLOAD_CSUM = BIT(1),
68 TX_OFFLOAD_VLAN = BIT(2),
69 TX_OFFLOAD_INVALID = BIT(3),
73 * hinic_txq_clean_stats - Clean the statistics of specific queue
74 * @txq: Logical Tx Queue
76 void hinic_txq_clean_stats(struct hinic_txq *txq)
78 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
80 u64_stats_update_begin(&txq_stats->syncp);
83 txq_stats->tx_busy = 0;
84 txq_stats->tx_wake = 0;
85 txq_stats->tx_dropped = 0;
86 txq_stats->big_frags_pkts = 0;
87 u64_stats_update_end(&txq_stats->syncp);
91 * hinic_txq_get_stats - get statistics of Tx Queue
92 * @txq: Logical Tx Queue
93 * @stats: return updated stats here
95 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
97 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
100 u64_stats_update_begin(&stats->syncp);
102 start = u64_stats_fetch_begin(&txq_stats->syncp);
103 stats->pkts = txq_stats->pkts;
104 stats->bytes = txq_stats->bytes;
105 stats->tx_busy = txq_stats->tx_busy;
106 stats->tx_wake = txq_stats->tx_wake;
107 stats->tx_dropped = txq_stats->tx_dropped;
108 stats->big_frags_pkts = txq_stats->big_frags_pkts;
109 } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
110 u64_stats_update_end(&stats->syncp);
114 * txq_stats_init - Initialize the statistics of specific queue
115 * @txq: Logical Tx Queue
117 static void txq_stats_init(struct hinic_txq *txq)
119 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
121 u64_stats_init(&txq_stats->syncp);
122 hinic_txq_clean_stats(txq);
126 * tx_map_skb - dma mapping for skb and return sges
127 * @nic_dev: nic device
129 * @sges: returned sges
131 * Return 0 - Success, negative - Failure
133 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
134 struct hinic_sge *sges)
136 struct hinic_hwdev *hwdev = nic_dev->hwdev;
137 struct hinic_hwif *hwif = hwdev->hwif;
138 struct pci_dev *pdev = hwif->pdev;
143 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
145 if (dma_mapping_error(&pdev->dev, dma_addr)) {
146 dev_err(&pdev->dev, "Failed to map Tx skb data\n");
150 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
152 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
153 frag = &skb_shinfo(skb)->frags[i];
155 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
158 if (dma_mapping_error(&pdev->dev, dma_addr)) {
159 dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
163 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
169 for (j = 0; j < i; j++)
170 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
171 sges[j + 1].len, DMA_TO_DEVICE);
173 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
179 * tx_unmap_skb - unmap the dma address of the skb
180 * @nic_dev: nic device
182 * @sges: the sges that are connected to the skb
184 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
185 struct hinic_sge *sges)
187 struct hinic_hwdev *hwdev = nic_dev->hwdev;
188 struct hinic_hwif *hwif = hwdev->hwif;
189 struct pci_dev *pdev = hwif->pdev;
192 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
193 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
194 sges[i + 1].len, DMA_TO_DEVICE);
196 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
200 static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
202 enum hinic_offload_type offload_type,
203 enum hinic_l3_offload_type *l3_type,
208 if (ip->v4->version == 4) {
209 *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
210 IPV4_PKT_NO_CHKSUM_OFFLOAD :
211 IPV4_PKT_WITH_CHKSUM_OFFLOAD;
212 *l4_proto = ip->v4->protocol;
213 } else if (ip->v4->version == 6) {
215 exthdr = ip->hdr + sizeof(*ip->v6);
216 *l4_proto = ip->v6->nexthdr;
217 if (exthdr != l4->hdr) {
218 int start = exthdr - skb->data;
221 ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
224 *l3_type = L3TYPE_UNKNOWN;
229 static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
230 enum hinic_offload_type offload_type, u8 l4_proto,
231 enum hinic_l4_offload_type *l4_offload,
232 u32 *l4_len, u32 *offset)
234 *l4_offload = OFFLOAD_DISABLE;
240 *l4_offload = TCP_OFFLOAD_ENABLE;
241 /* doff in unit of 4B */
242 *l4_len = l4->tcp->doff * 4;
243 *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
247 *l4_offload = UDP_OFFLOAD_ENABLE;
248 *l4_len = sizeof(struct udphdr);
249 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
253 /* only csum offload support sctp */
254 if (offload_type != TX_OFFLOAD_CSUM)
257 *l4_offload = SCTP_OFFLOAD_ENABLE;
258 *l4_len = sizeof(struct sctphdr);
259 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
267 static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
269 return (ip->v4->version == 4) ?
270 csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
271 csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
274 static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
277 u32 offset, l4_len, ip_identify, network_hdr_len;
278 enum hinic_l3_offload_type l3_offload;
279 enum hinic_l4_offload_type l4_offload;
284 if (!skb_is_gso(skb))
287 if (skb_cow_head(skb, 0) < 0)
288 return -EPROTONOSUPPORT;
290 if (skb->encapsulation) {
291 u32 gso_type = skb_shinfo(skb)->gso_type;
295 ip.hdr = skb_network_header(skb);
296 l4.hdr = skb_transport_header(skb);
297 network_hdr_len = skb_inner_network_header_len(skb);
299 if (ip.v4->version == 4) {
301 l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
302 } else if (ip.v4->version == 6) {
303 l3_offload = IPV6_PKT;
308 hinic_task_set_outter_l3(task, l3_offload,
309 skb_network_header_len(skb));
311 if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
312 l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
313 tunnel_type = TUNNEL_UDP_CSUM;
314 } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
315 tunnel_type = TUNNEL_UDP_NO_CSUM;
318 l4_tunnel_len = skb_inner_network_offset(skb) -
319 skb_transport_offset(skb);
320 hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
322 ip.hdr = skb_inner_network_header(skb);
323 l4.hdr = skb_inner_transport_header(skb);
325 ip.hdr = skb_network_header(skb);
326 l4.hdr = skb_transport_header(skb);
327 network_hdr_len = skb_network_header_len(skb);
330 /* initialize inner IP header fields */
331 if (ip.v4->version == 4)
334 ip.v6->payload_len = 0;
336 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
339 hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
342 if (l4_proto == IPPROTO_TCP)
343 l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
345 get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
348 hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
349 ip_identify, skb_shinfo(skb)->gso_size);
354 static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
357 enum hinic_l4_offload_type l4_offload;
358 u32 offset, l4_len, network_hdr_len;
359 enum hinic_l3_offload_type l3_type;
364 if (skb->ip_summed != CHECKSUM_PARTIAL)
367 if (skb->encapsulation) {
370 ip.hdr = skb_network_header(skb);
372 if (ip.v4->version == 4)
373 l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
374 else if (ip.v4->version == 6)
377 l3_type = L3TYPE_UNKNOWN;
379 hinic_task_set_outter_l3(task, l3_type,
380 skb_network_header_len(skb));
382 l4_tunnel_len = skb_inner_network_offset(skb) -
383 skb_transport_offset(skb);
385 hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
388 ip.hdr = skb_inner_network_header(skb);
389 l4.hdr = skb_inner_transport_header(skb);
390 network_hdr_len = skb_inner_network_header_len(skb);
392 ip.hdr = skb_network_header(skb);
393 l4.hdr = skb_transport_header(skb);
394 network_hdr_len = skb_network_header_len(skb);
397 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
400 hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
402 get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
405 hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
410 static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
411 u16 vlan_tag, u16 vlan_pri)
413 task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
414 HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
416 *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
419 static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
422 enum hinic_offload_type offload = 0;
426 enabled = offload_tso(task, queue_info, skb);
428 offload |= TX_OFFLOAD_TSO;
429 } else if (enabled == 0) {
430 enabled = offload_csum(task, queue_info, skb);
432 offload |= TX_OFFLOAD_CSUM;
434 return -EPROTONOSUPPORT;
437 if (unlikely(skb_vlan_tag_present(skb))) {
438 vlan_tag = skb_vlan_tag_get(skb);
439 offload_vlan(task, queue_info, vlan_tag,
440 vlan_tag >> VLAN_PRIO_SHIFT);
441 offload |= TX_OFFLOAD_VLAN;
445 hinic_task_set_l2hdr(task, skb_network_offset(skb));
447 /* payload offset should not more than 221 */
448 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
449 MAX_PAYLOAD_OFFSET) {
450 return -EPROTONOSUPPORT;
453 /* mss should not less than 80 */
454 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
455 *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
456 *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
462 netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
464 struct hinic_dev *nic_dev = netdev_priv(netdev);
465 u16 prod_idx, q_id = skb->queue_mapping;
466 struct netdev_queue *netdev_txq;
467 int nr_sges, err = NETDEV_TX_OK;
468 struct hinic_sq_wqe *sq_wqe;
469 unsigned int wqe_size;
470 struct hinic_txq *txq;
473 txq = &nic_dev->txqs[q_id];
474 qp = container_of(txq->sq, struct hinic_qp, sq);
475 nr_sges = skb_shinfo(skb)->nr_frags + 1;
477 err = tx_map_skb(nic_dev, skb, txq->sges);
481 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
483 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
485 netif_stop_subqueue(netdev, qp->q_id);
487 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
489 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
493 tx_unmap_skb(nic_dev, skb, txq->sges);
495 u64_stats_update_begin(&txq->txq_stats.syncp);
496 txq->txq_stats.tx_busy++;
497 u64_stats_update_end(&txq->txq_stats.syncp);
498 err = NETDEV_TX_BUSY;
504 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
505 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
508 netdev_txq = netdev_get_tx_queue(netdev, q_id);
509 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
510 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
515 dev_kfree_skb_any(skb);
516 u64_stats_update_begin(&txq->txq_stats.syncp);
517 txq->txq_stats.tx_dropped++;
518 u64_stats_update_end(&txq->txq_stats.syncp);
523 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
525 struct hinic_dev *nic_dev = netdev_priv(netdev);
526 u16 prod_idx, q_id = skb->queue_mapping;
527 struct netdev_queue *netdev_txq;
528 int nr_sges, err = NETDEV_TX_OK;
529 struct hinic_sq_wqe *sq_wqe;
530 unsigned int wqe_size;
531 struct hinic_txq *txq;
534 txq = &nic_dev->txqs[q_id];
535 qp = container_of(txq->sq, struct hinic_qp, sq);
537 if (skb->len < MIN_SKB_LEN) {
538 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
539 netdev_err(netdev, "Failed to pad skb\n");
540 goto update_error_stats;
543 skb->len = MIN_SKB_LEN;
546 nr_sges = skb_shinfo(skb)->nr_frags + 1;
548 u64_stats_update_begin(&txq->txq_stats.syncp);
549 txq->txq_stats.big_frags_pkts++;
550 u64_stats_update_end(&txq->txq_stats.syncp);
553 if (nr_sges > txq->max_sges) {
554 netdev_err(netdev, "Too many Tx sges\n");
558 err = tx_map_skb(nic_dev, skb, txq->sges);
562 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
564 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
566 netif_stop_subqueue(netdev, qp->q_id);
568 /* Check for the case free_tx_poll is called in another cpu
569 * and we stopped the subqueue after free_tx_poll check.
571 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
573 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
577 tx_unmap_skb(nic_dev, skb, txq->sges);
579 u64_stats_update_begin(&txq->txq_stats.syncp);
580 txq->txq_stats.tx_busy++;
581 u64_stats_update_end(&txq->txq_stats.syncp);
582 err = NETDEV_TX_BUSY;
588 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
590 err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
594 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
597 netdev_txq = netdev_get_tx_queue(netdev, q_id);
598 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
599 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
604 hinic_sq_return_wqe(txq->sq, wqe_size);
605 tx_unmap_skb(nic_dev, skb, txq->sges);
608 dev_kfree_skb_any(skb);
611 u64_stats_update_begin(&txq->txq_stats.syncp);
612 txq->txq_stats.tx_dropped++;
613 u64_stats_update_end(&txq->txq_stats.syncp);
619 * tx_free_skb - unmap and free skb
620 * @nic_dev: nic device
622 * @sges: the sges that are connected to the skb
624 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
625 struct hinic_sge *sges)
627 tx_unmap_skb(nic_dev, skb, sges);
629 dev_kfree_skb_any(skb);
633 * free_all_rx_skbs - free all skbs in tx queue
636 static void free_all_tx_skbs(struct hinic_txq *txq)
638 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
639 struct hinic_sq *sq = txq->sq;
640 struct hinic_sq_wqe *sq_wqe;
641 unsigned int wqe_size;
646 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
647 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
651 nr_sges = skb_shinfo(skb)->nr_frags + 1;
653 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
655 hinic_sq_put_wqe(sq, wqe_size);
657 tx_free_skb(nic_dev, skb, txq->free_sges);
662 * free_tx_poll - free finished tx skbs in tx queue that connected to napi
664 * @budget: number of tx
666 * Return 0 - Success, negative - Failure
668 static int free_tx_poll(struct napi_struct *napi, int budget)
670 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
671 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
672 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
673 struct netdev_queue *netdev_txq;
674 struct hinic_sq *sq = txq->sq;
675 struct hinic_wq *wq = sq->wq;
676 struct hinic_sq_wqe *sq_wqe;
677 unsigned int wqe_size;
678 int nr_sges, pkts = 0;
684 hw_ci = HW_CONS_IDX(sq) & wq->mask;
688 /* Reading a WQEBB to get real WQE size and consumer index. */
689 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
691 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
694 /* If this WQE have multiple WQEBBs, we will read again to get
697 if (wqe_size > wq->wqebb_size) {
698 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
699 if (unlikely(!sq_wqe))
703 tx_bytes += skb->len;
706 nr_sges = skb_shinfo(skb)->nr_frags + 1;
708 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
710 hinic_sq_put_wqe(sq, wqe_size);
712 tx_free_skb(nic_dev, skb, txq->free_sges);
713 } while (pkts < budget);
715 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
716 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
717 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
719 __netif_tx_lock(netdev_txq, smp_processor_id());
721 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
723 __netif_tx_unlock(netdev_txq);
725 u64_stats_update_begin(&txq->txq_stats.syncp);
726 txq->txq_stats.tx_wake++;
727 u64_stats_update_end(&txq->txq_stats.syncp);
730 u64_stats_update_begin(&txq->txq_stats.syncp);
731 txq->txq_stats.bytes += tx_bytes;
732 txq->txq_stats.pkts += pkts;
733 u64_stats_update_end(&txq->txq_stats.syncp);
737 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
738 hinic_hwdev_set_msix_state(nic_dev->hwdev,
748 static void tx_napi_add(struct hinic_txq *txq, int weight)
750 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
751 napi_enable(&txq->napi);
754 static void tx_napi_del(struct hinic_txq *txq)
756 napi_disable(&txq->napi);
757 netif_napi_del(&txq->napi);
760 static irqreturn_t tx_irq(int irq, void *data)
762 struct hinic_txq *txq = data;
763 struct hinic_dev *nic_dev;
765 nic_dev = netdev_priv(txq->netdev);
767 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
768 /* Disable the interrupt until napi will be completed */
769 hinic_hwdev_set_msix_state(nic_dev->hwdev,
773 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
775 napi_schedule(&txq->napi);
779 static int tx_request_irq(struct hinic_txq *txq)
781 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
782 struct hinic_msix_config interrupt_info = {0};
783 struct hinic_intr_coal_info *intr_coal = NULL;
784 struct hinic_hwdev *hwdev = nic_dev->hwdev;
785 struct hinic_hwif *hwif = hwdev->hwif;
786 struct pci_dev *pdev = hwif->pdev;
787 struct hinic_sq *sq = txq->sq;
791 qp = container_of(sq, struct hinic_qp, sq);
793 tx_napi_add(txq, nic_dev->tx_weight);
795 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
796 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
797 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
798 TX_IRQ_NO_RESEND_TIMER);
800 intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
801 interrupt_info.msix_index = sq->msix_entry;
802 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
803 interrupt_info.pending_cnt = intr_coal->pending_limt;
804 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
806 err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
808 netif_err(nic_dev, drv, txq->netdev,
809 "Failed to set TX interrupt coalescing attribute\n");
814 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
816 dev_err(&pdev->dev, "Failed to request Tx irq\n");
824 static void tx_free_irq(struct hinic_txq *txq)
826 struct hinic_sq *sq = txq->sq;
828 free_irq(sq->irq, txq);
833 * hinic_init_txq - Initialize the Tx Queue
834 * @txq: Logical Tx Queue
835 * @sq: Hardware Tx Queue to connect the Logical queue with
836 * @netdev: network device to connect the Logical queue with
838 * Return 0 - Success, negative - Failure
840 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
841 struct net_device *netdev)
843 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
844 struct hinic_dev *nic_dev = netdev_priv(netdev);
845 struct hinic_hwdev *hwdev = nic_dev->hwdev;
846 int err, irqname_len;
849 txq->netdev = netdev;
854 txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
856 sges_size = txq->max_sges * sizeof(*txq->sges);
857 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
861 sges_size = txq->max_sges * sizeof(*txq->free_sges);
862 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
863 if (!txq->free_sges) {
865 goto err_alloc_free_sges;
868 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
869 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
870 if (!txq->irq_name) {
872 goto err_alloc_irqname;
875 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
877 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
878 CI_UPDATE_NO_COALESC);
882 err = tx_request_irq(txq);
884 netdev_err(netdev, "Failed to request Tx irq\n");
892 devm_kfree(&netdev->dev, txq->irq_name);
895 devm_kfree(&netdev->dev, txq->free_sges);
898 devm_kfree(&netdev->dev, txq->sges);
903 * hinic_clean_txq - Clean the Tx Queue
904 * @txq: Logical Tx Queue
906 void hinic_clean_txq(struct hinic_txq *txq)
908 struct net_device *netdev = txq->netdev;
912 free_all_tx_skbs(txq);
914 devm_kfree(&netdev->dev, txq->irq_name);
915 devm_kfree(&netdev->dev, txq->free_sges);
916 devm_kfree(&netdev->dev, txq->sges);