1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/u64_stats_sync.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/skbuff.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/prefetch.h>
20 #include <linux/cpumask.h>
21 #include <linux/if_vlan.h>
22 #include <asm/barrier.h>
24 #include "hinic_common.h"
25 #include "hinic_hw_if.h"
26 #include "hinic_hw_wqe.h"
27 #include "hinic_hw_wq.h"
28 #include "hinic_hw_qp.h"
29 #include "hinic_hw_dev.h"
31 #include "hinic_dev.h"
33 #define RX_IRQ_NO_PENDING 0
34 #define RX_IRQ_NO_COALESC 0
35 #define RX_IRQ_NO_LLI_TIMER 0
36 #define RX_IRQ_NO_CREDIT 0
37 #define RX_IRQ_NO_RESEND_TIMER 0
38 #define HINIC_RX_BUFFER_WRITE 16
40 #define HINIC_RX_IPV6_PKT 7
41 #define LRO_PKT_HDR_LEN_IPV4 66
42 #define LRO_PKT_HDR_LEN_IPV6 86
43 #define LRO_REPLENISH_THLD 256
45 #define LRO_PKT_HDR_LEN(cqe) \
46 (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
47 HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
50 * hinic_rxq_clean_stats - Clean the statistics of specific queue
51 * @rxq: Logical Rx Queue
53 void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
55 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
57 u64_stats_update_begin(&rxq_stats->syncp);
60 rxq_stats->errors = 0;
61 rxq_stats->csum_errors = 0;
62 rxq_stats->other_errors = 0;
63 u64_stats_update_end(&rxq_stats->syncp);
67 * hinic_rxq_get_stats - get statistics of Rx Queue
68 * @rxq: Logical Rx Queue
69 * @stats: return updated stats here
71 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
73 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
76 u64_stats_update_begin(&stats->syncp);
78 start = u64_stats_fetch_begin(&rxq_stats->syncp);
79 stats->pkts = rxq_stats->pkts;
80 stats->bytes = rxq_stats->bytes;
81 stats->errors = rxq_stats->csum_errors +
82 rxq_stats->other_errors;
83 stats->csum_errors = rxq_stats->csum_errors;
84 stats->other_errors = rxq_stats->other_errors;
85 } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
86 u64_stats_update_end(&stats->syncp);
90 * rxq_stats_init - Initialize the statistics of specific queue
91 * @rxq: Logical Rx Queue
93 static void rxq_stats_init(struct hinic_rxq *rxq)
95 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
97 u64_stats_init(&rxq_stats->syncp);
98 hinic_rxq_clean_stats(rxq);
101 static void rx_csum(struct hinic_rxq *rxq, u32 status,
104 struct net_device *netdev = rxq->netdev;
107 csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
109 if (!(netdev->features & NETIF_F_RXCSUM))
113 skb->ip_summed = CHECKSUM_UNNECESSARY;
115 if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
116 HINIC_RX_CSUM_IPSU_OTHER_ERR)))
117 rxq->rxq_stats.csum_errors++;
118 skb->ip_summed = CHECKSUM_NONE;
122 * rx_alloc_skb - allocate skb and map it to dma address
124 * @dma_addr: returned dma address for the skb
128 static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
129 dma_addr_t *dma_addr)
131 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
132 struct hinic_hwdev *hwdev = nic_dev->hwdev;
133 struct hinic_hwif *hwif = hwdev->hwif;
134 struct pci_dev *pdev = hwif->pdev;
139 skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
141 netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
145 addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
147 err = dma_mapping_error(&pdev->dev, addr);
149 dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
157 dev_kfree_skb_any(skb);
162 * rx_unmap_skb - unmap the dma address of the skb
164 * @dma_addr: dma address of the skb
166 static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
168 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
169 struct hinic_hwdev *hwdev = nic_dev->hwdev;
170 struct hinic_hwif *hwif = hwdev->hwif;
171 struct pci_dev *pdev = hwif->pdev;
173 dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
178 * rx_free_skb - unmap and free skb
181 * @dma_addr: dma address of the skb
183 static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
186 rx_unmap_skb(rxq, dma_addr);
187 dev_kfree_skb_any(skb);
191 * rx_alloc_pkts - allocate pkts in rx queue
194 * Return number of skbs allocated
196 static int rx_alloc_pkts(struct hinic_rxq *rxq)
198 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
199 struct hinic_rq_wqe *rq_wqe;
200 unsigned int free_wqebbs;
201 struct hinic_sge sge;
207 free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
209 /* Limit the allocation chunks */
210 if (free_wqebbs > nic_dev->rx_weight)
211 free_wqebbs = nic_dev->rx_weight;
213 for (i = 0; i < free_wqebbs; i++) {
214 skb = rx_alloc_skb(rxq, &dma_addr);
216 netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
220 hinic_set_sge(&sge, dma_addr, skb->len);
222 rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
225 rx_free_skb(rxq, skb, dma_addr);
229 hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
231 hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
236 wmb(); /* write all the wqes before update PI */
238 hinic_rq_update(rxq->rq, prod_idx);
245 * free_all_rx_skbs - free all skbs in rx queue
248 static void free_all_rx_skbs(struct hinic_rxq *rxq)
250 struct hinic_rq *rq = rxq->rq;
251 struct hinic_hw_wqe *hw_wqe;
252 struct hinic_sge sge;
255 while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
259 hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
261 hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
263 rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
268 * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
270 * @head_skb: the first skb in the list
271 * @left_pkt_len: left size of the pkt exclude head skb
272 * @ci: consumer index
274 * Return number of wqes that used for the left of the pkt
276 static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
277 unsigned int left_pkt_len, u16 ci)
279 struct sk_buff *skb, *curr_skb = head_skb;
280 struct hinic_rq_wqe *rq_wqe;
281 unsigned int curr_len;
282 struct hinic_sge sge;
285 while (left_pkt_len > 0) {
286 rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
291 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
293 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
297 curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
300 left_pkt_len -= curr_len;
302 __skb_put(skb, curr_len);
304 if (curr_skb == head_skb)
305 skb_shinfo(head_skb)->frag_list = skb;
307 curr_skb->next = skb;
309 head_skb->len += skb->len;
310 head_skb->data_len += skb->len;
311 head_skb->truesize += skb->truesize;
319 static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
322 struct net_device *netdev = nic_dev->netdev;
323 u8 *lb_buf = nic_dev->lb_test_rx_buf;
324 int lb_len = nic_dev->lb_pkt_len;
325 int pkt_offset, frag_len, i;
326 void *frag_data = NULL;
328 if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
329 nic_dev->lb_test_rx_idx = 0;
330 netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n");
333 if (skb->len != nic_dev->lb_pkt_len) {
334 netif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
335 nic_dev->lb_test_rx_idx++;
339 pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
340 frag_len = (int)skb_headlen(skb);
341 memcpy(lb_buf + pkt_offset, skb->data, frag_len);
342 pkt_offset += frag_len;
343 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
344 frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
345 frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
346 memcpy((lb_buf + pkt_offset), frag_data, frag_len);
347 pkt_offset += frag_len;
349 nic_dev->lb_test_rx_idx++;
353 * rxq_recv - Rx handler
355 * @budget: maximum pkts to process
357 * Return number of pkts received
359 static int rxq_recv(struct hinic_rxq *rxq, int budget)
361 struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
362 struct net_device *netdev = rxq->netdev;
363 u64 pkt_len = 0, rx_bytes = 0;
364 struct hinic_rq *rq = rxq->rq;
365 struct hinic_rq_wqe *rq_wqe;
366 struct hinic_dev *nic_dev;
367 unsigned int free_wqebbs;
368 struct hinic_rq_cqe *cqe;
369 int num_wqes, pkts = 0;
370 struct hinic_sge sge;
379 nic_dev = netdev_priv(netdev);
381 while (pkts < budget) {
384 rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
389 /* make sure we read rx_done before packet length */
393 status = be32_to_cpu(cqe->status);
394 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
396 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
398 rx_csum(rxq, status, skb);
404 if (pkt_len <= HINIC_RX_BUF_SZ) {
405 __skb_put(skb, pkt_len);
407 __skb_put(skb, HINIC_RX_BUF_SZ);
408 num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
409 HINIC_RX_BUF_SZ, ci);
412 hinic_rq_put_wqe(rq, ci,
413 (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
415 offload_type = be32_to_cpu(cqe->offload_type);
416 vlan_len = be32_to_cpu(cqe->len);
417 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
418 HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
419 vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
420 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
423 if (unlikely(nic_dev->flags & HINIC_LP_TEST))
424 hinic_copy_lp_data(nic_dev, skb);
426 skb_record_rx_queue(skb, qp->q_id);
427 skb->protocol = eth_type_trans(skb, rxq->netdev);
429 napi_gro_receive(&rxq->napi, skb);
434 num_lro = HINIC_GET_RX_NUM_LRO(status);
436 rx_bytes += ((num_lro - 1) *
437 LRO_PKT_HDR_LEN(cqe));
440 (u16)(pkt_len >> rxq->rx_buff_shift) +
441 ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
446 if (num_wqe >= LRO_REPLENISH_THLD)
450 free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
451 if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
454 u64_stats_update_begin(&rxq->rxq_stats.syncp);
455 rxq->rxq_stats.pkts += pkts;
456 rxq->rxq_stats.bytes += rx_bytes;
457 u64_stats_update_end(&rxq->rxq_stats.syncp);
462 static int rx_poll(struct napi_struct *napi, int budget)
464 struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
465 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
466 struct hinic_rq *rq = rxq->rq;
469 pkts = rxq_recv(rxq, budget);
475 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
476 hinic_hwdev_set_msix_state(nic_dev->hwdev,
483 static void rx_add_napi(struct hinic_rxq *rxq)
485 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
487 netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
488 napi_enable(&rxq->napi);
491 static void rx_del_napi(struct hinic_rxq *rxq)
493 napi_disable(&rxq->napi);
494 netif_napi_del(&rxq->napi);
497 static irqreturn_t rx_irq(int irq, void *data)
499 struct hinic_rxq *rxq = (struct hinic_rxq *)data;
500 struct hinic_rq *rq = rxq->rq;
501 struct hinic_dev *nic_dev;
503 /* Disable the interrupt until napi will be completed */
504 nic_dev = netdev_priv(rxq->netdev);
505 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
506 hinic_hwdev_set_msix_state(nic_dev->hwdev,
510 nic_dev = netdev_priv(rxq->netdev);
511 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
513 napi_schedule(&rxq->napi);
517 static int rx_request_irq(struct hinic_rxq *rxq)
519 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
520 struct hinic_msix_config interrupt_info = {0};
521 struct hinic_intr_coal_info *intr_coal = NULL;
522 struct hinic_hwdev *hwdev = nic_dev->hwdev;
523 struct hinic_rq *rq = rxq->rq;
527 qp = container_of(rq, struct hinic_qp, rq);
531 hinic_hwdev_msix_set(hwdev, rq->msix_entry,
532 RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
533 RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
534 RX_IRQ_NO_RESEND_TIMER);
536 intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id];
537 interrupt_info.msix_index = rq->msix_entry;
538 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
539 interrupt_info.pending_cnt = intr_coal->pending_limt;
540 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
542 err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
544 netif_err(nic_dev, drv, rxq->netdev,
545 "Failed to set RX interrupt coalescing attribute\n");
550 err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
556 cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
557 return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
560 static void rx_free_irq(struct hinic_rxq *rxq)
562 struct hinic_rq *rq = rxq->rq;
564 irq_set_affinity_hint(rq->irq, NULL);
565 free_irq(rq->irq, rxq);
570 * hinic_init_rxq - Initialize the Rx Queue
571 * @rxq: Logical Rx Queue
572 * @rq: Hardware Rx Queue to connect the Logical queue with
573 * @netdev: network device to connect the Logical queue with
575 * Return 0 - Success, negative - Failure
577 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
578 struct net_device *netdev)
580 struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
583 rxq->netdev = netdev;
585 rxq->buf_len = HINIC_RX_BUF_SZ;
586 rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
590 rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
591 "hinic_rxq%d", qp->q_id);
595 pkts = rx_alloc_pkts(rxq);
601 err = rx_request_irq(rxq);
603 netdev_err(netdev, "Failed to request Rx irq\n");
611 free_all_rx_skbs(rxq);
612 devm_kfree(&netdev->dev, rxq->irq_name);
617 * hinic_clean_rxq - Clean the Rx Queue
618 * @rxq: Logical Rx Queue
620 void hinic_clean_rxq(struct hinic_rxq *rxq)
622 struct net_device *netdev = rxq->netdev;
626 free_all_rx_skbs(rxq);
627 devm_kfree(&netdev->dev, rxq->irq_name);