1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Physical Function ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/iommu.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
20 #include "otx2_common.h"
21 #include "otx2_txrx.h"
22 #include "otx2_struct.h"
25 #include <rvu_trace.h>
27 #define DRV_NAME "rvu_nicpf"
28 #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
30 /* Supported devices */
31 static const struct pci_device_id otx2_pf_id_table[] = {
32 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
33 { 0, } /* end of table */
36 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
37 MODULE_DESCRIPTION(DRV_STRING);
38 MODULE_LICENSE("GPL v2");
39 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
41 static void otx2_vf_link_event_task(struct work_struct *work);
48 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
49 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
51 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
53 struct otx2_nic *pf = netdev_priv(netdev);
54 bool if_up = netif_running(netdev);
57 if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
58 netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
65 netdev_info(netdev, "Changing MTU from %d to %d\n",
66 netdev->mtu, new_mtu);
67 netdev->mtu = new_mtu;
70 err = otx2_open(netdev);
75 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
77 int irq, vfs = pf->total_vfs;
79 /* Disable VFs ME interrupts */
80 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
81 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
84 /* Disable VFs FLR interrupts */
85 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
86 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
92 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
93 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
96 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
97 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
101 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
105 destroy_workqueue(pf->flr_wq);
107 devm_kfree(pf->dev, pf->flr_wrk);
110 static void otx2_flr_handler(struct work_struct *work)
112 struct flr_work *flrwork = container_of(work, struct flr_work, work);
113 struct otx2_nic *pf = flrwork->pf;
114 struct mbox *mbox = &pf->mbox;
118 vf = flrwork - pf->flr_wrk;
120 mutex_lock(&mbox->lock);
121 req = otx2_mbox_alloc_msg_vf_flr(mbox);
123 mutex_unlock(&mbox->lock);
126 req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
127 req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
129 if (!otx2_sync_mbox_msg(&pf->mbox)) {
134 /* clear transcation pending bit */
135 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
136 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
139 mutex_unlock(&mbox->lock);
142 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
144 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
145 int reg, dev, vf, start_vf, num_reg = 1;
148 if (pf->total_vfs > 64)
151 for (reg = 0; reg < num_reg; reg++) {
152 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
156 for (vf = 0; vf < 64; vf++) {
157 if (!(intr & BIT_ULL(vf)))
160 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
161 /* Clear interrupt */
162 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
163 /* Disable the interrupt */
164 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
171 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
173 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
174 int vf, reg, num_reg = 1;
177 if (pf->total_vfs > 64)
180 for (reg = 0; reg < num_reg; reg++) {
181 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
184 for (vf = 0; vf < 64; vf++) {
185 if (!(intr & BIT_ULL(vf)))
187 /* clear trpend bit */
188 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
189 /* clear interrupt */
190 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
196 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
198 struct otx2_hw *hw = &pf->hw;
202 /* Register ME interrupt handler*/
203 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
204 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
205 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
206 otx2_pf_me_intr_handler, 0, irq_name, pf);
209 "RVUPF: IRQ registration failed for ME0\n");
212 /* Register FLR interrupt handler */
213 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
214 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
215 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
216 otx2_pf_flr_intr_handler, 0, irq_name, pf);
219 "RVUPF: IRQ registration failed for FLR0\n");
224 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
225 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
226 rvu_get_pf(pf->pcifunc));
227 ret = request_irq(pci_irq_vector
228 (pf->pdev, RVU_PF_INT_VEC_VFME1),
229 otx2_pf_me_intr_handler, 0, irq_name, pf);
232 "RVUPF: IRQ registration failed for ME1\n");
234 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
235 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
236 rvu_get_pf(pf->pcifunc));
237 ret = request_irq(pci_irq_vector
238 (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
239 otx2_pf_flr_intr_handler, 0, irq_name, pf);
242 "RVUPF: IRQ registration failed for FLR1\n");
247 /* Enable ME interrupt for all VFs*/
248 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
249 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
251 /* Enable FLR interrupt for all VFs*/
252 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
253 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
258 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
259 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
262 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
263 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
269 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
273 pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
274 WQ_UNBOUND | WQ_HIGHPRI, 1);
278 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
279 sizeof(struct flr_work), GFP_KERNEL);
281 destroy_workqueue(pf->flr_wq);
285 for (vf = 0; vf < num_vfs; vf++) {
286 pf->flr_wrk[vf].pf = pf;
287 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
293 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
294 int first, int mdevs, u64 intr, int type)
296 struct otx2_mbox_dev *mdev;
297 struct otx2_mbox *mbox;
298 struct mbox_hdr *hdr;
301 for (i = first; i < mdevs; i++) {
303 if (!(intr & BIT_ULL(i - first)))
307 mdev = &mbox->dev[i];
308 if (type == TYPE_PFAF)
309 otx2_sync_mbox_bbuf(mbox, i);
310 hdr = mdev->mbase + mbox->rx_start;
311 /* The hdr->num_msgs is set to zero immediately in the interrupt
312 * handler to ensure that it holds a correct value next time
313 * when the interrupt handler is called.
314 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
315 * pf>mbox.up_num_msgs holds the data for use in
316 * pfaf_mbox_up_handler.
319 mw[i].num_msgs = hdr->num_msgs;
321 if (type == TYPE_PFAF)
322 memset(mbox->hwbase + mbox->rx_start, 0,
323 ALIGN(sizeof(struct mbox_hdr),
326 queue_work(mbox_wq, &mw[i].mbox_wrk);
330 mdev = &mbox->dev[i];
331 if (type == TYPE_PFAF)
332 otx2_sync_mbox_bbuf(mbox, i);
333 hdr = mdev->mbase + mbox->rx_start;
335 mw[i].up_num_msgs = hdr->num_msgs;
337 if (type == TYPE_PFAF)
338 memset(mbox->hwbase + mbox->rx_start, 0,
339 ALIGN(sizeof(struct mbox_hdr),
342 queue_work(mbox_wq, &mw[i].mbox_up_wrk);
347 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
348 struct otx2_mbox *pfvf_mbox, void *bbuf_base,
351 struct otx2_mbox_dev *src_mdev = mdev;
354 /* Msgs are already copied, trigger VF's mbox irq */
357 offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
358 writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
360 /* Restore VF's mbox bounce buffer region address */
361 src_mdev->mbase = bbuf_base;
364 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
365 struct otx2_mbox *src_mbox,
366 int dir, int vf, int num_msgs)
368 struct otx2_mbox_dev *src_mdev, *dst_mdev;
369 struct mbox_hdr *mbox_hdr;
370 struct mbox_hdr *req_hdr;
371 struct mbox *dst_mbox;
374 if (dir == MBOX_DIR_PFAF) {
375 /* Set VF's mailbox memory as PF's bounce buffer memory, so
376 * that explicit copying of VF's msgs to PF=>AF mbox region
377 * and AF=>PF responses to VF's mbox region can be avoided.
379 src_mdev = &src_mbox->dev[vf];
380 mbox_hdr = src_mbox->hwbase +
381 src_mbox->rx_start + (vf * MBOX_SIZE);
383 dst_mbox = &pf->mbox;
384 dst_size = dst_mbox->mbox.tx_size -
385 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
386 /* Check if msgs fit into destination area and has valid size */
387 if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
390 dst_mdev = &dst_mbox->mbox.dev[0];
392 mutex_lock(&pf->mbox.lock);
393 dst_mdev->mbase = src_mdev->mbase;
394 dst_mdev->msg_size = mbox_hdr->msg_size;
395 dst_mdev->num_msgs = num_msgs;
396 err = otx2_sync_mbox_msg(dst_mbox);
397 /* Error code -EIO indicate there is a communication failure
398 * to the AF. Rest of the error codes indicate that AF processed
399 * VF messages and set the error codes in response messages
400 * (if any) so simply forward responses to VF.
404 "AF not responding to VF%d messages\n", vf);
405 /* restore PF mbase and exit */
406 dst_mdev->mbase = pf->mbox.bbuf_base;
407 mutex_unlock(&pf->mbox.lock);
410 /* At this point, all the VF messages sent to AF are acked
411 * with proper responses and responses are copied to VF
412 * mailbox hence raise interrupt to VF.
414 req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
415 dst_mbox->mbox.rx_start);
416 req_hdr->num_msgs = num_msgs;
418 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
419 pf->mbox.bbuf_base, vf);
420 mutex_unlock(&pf->mbox.lock);
421 } else if (dir == MBOX_DIR_PFVF_UP) {
422 src_mdev = &src_mbox->dev[0];
423 mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
424 req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
426 req_hdr->num_msgs = num_msgs;
428 dst_mbox = &pf->mbox_pfvf[0];
429 dst_size = dst_mbox->mbox_up.tx_size -
430 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
431 /* Check if msgs fit into destination area */
432 if (mbox_hdr->msg_size > dst_size)
435 dst_mdev = &dst_mbox->mbox_up.dev[vf];
436 dst_mdev->mbase = src_mdev->mbase;
437 dst_mdev->msg_size = mbox_hdr->msg_size;
438 dst_mdev->num_msgs = mbox_hdr->num_msgs;
439 err = otx2_sync_mbox_up_msg(dst_mbox, vf);
442 "VF%d is not responding to mailbox\n", vf);
445 } else if (dir == MBOX_DIR_VFPF_UP) {
446 req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
448 req_hdr->num_msgs = num_msgs;
449 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
451 pf->mbox_pfvf[vf].bbuf_base,
458 static void otx2_pfvf_mbox_handler(struct work_struct *work)
460 struct mbox_msghdr *msg = NULL;
461 int offset, vf_idx, id, err;
462 struct otx2_mbox_dev *mdev;
463 struct mbox_hdr *req_hdr;
464 struct otx2_mbox *mbox;
465 struct mbox *vf_mbox;
468 vf_mbox = container_of(work, struct mbox, mbox_wrk);
470 vf_idx = vf_mbox - pf->mbox_pfvf;
472 mbox = &pf->mbox_pfvf[0].mbox;
473 mdev = &mbox->dev[vf_idx];
474 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
476 offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
478 for (id = 0; id < vf_mbox->num_msgs; id++) {
479 msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
482 if (msg->sig != OTX2_MBOX_REQ_SIG)
485 /* Set VF's number in each of the msg */
486 msg->pcifunc &= RVU_PFVF_FUNC_MASK;
487 msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
488 offset = msg->next_msgoff;
490 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
497 otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
498 otx2_mbox_msg_send(mbox, vf_idx);
501 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
503 struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
504 struct otx2_nic *pf = vf_mbox->pfvf;
505 struct otx2_mbox_dev *mdev;
506 int offset, id, vf_idx = 0;
507 struct mbox_hdr *rsp_hdr;
508 struct mbox_msghdr *msg;
509 struct otx2_mbox *mbox;
511 vf_idx = vf_mbox - pf->mbox_pfvf;
512 mbox = &pf->mbox_pfvf[0].mbox_up;
513 mdev = &mbox->dev[vf_idx];
515 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
516 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
518 for (id = 0; id < vf_mbox->up_num_msgs; id++) {
519 msg = mdev->mbase + offset;
521 if (msg->id >= MBOX_MSG_MAX) {
523 "Mbox msg with unknown ID 0x%x\n", msg->id);
527 if (msg->sig != OTX2_MBOX_RSP_SIG) {
529 "Mbox msg with wrong signature %x, ID 0x%x\n",
535 case MBOX_MSG_CGX_LINK_EVENT:
540 "Mbox msg response has err %d, ID 0x%x\n",
546 offset = mbox->rx_start + msg->next_msgoff;
547 if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
548 __otx2_mbox_reset(mbox, 0);
553 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
555 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
556 int vfs = pf->total_vfs;
560 mbox = pf->mbox_pfvf;
561 /* Handle VF interrupts */
563 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
564 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
565 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
570 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
571 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
573 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
575 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
580 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
582 void __iomem *hwbase;
590 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
591 sizeof(struct mbox), GFP_KERNEL);
595 pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
596 WQ_UNBOUND | WQ_HIGHPRI |
598 if (!pf->mbox_pfvf_wq)
601 /* On CN10K platform, PF <-> VF mailbox region follows after
602 * PF <-> AF mailbox region.
604 if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
605 base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
608 base = readq((void __iomem *)((u64)pf->reg_base +
609 RVU_PF_VF_BAR4_ADDR));
611 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
617 mbox = &pf->mbox_pfvf[0];
618 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
619 MBOX_DIR_PFVF, numvfs);
623 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
624 MBOX_DIR_PFVF_UP, numvfs);
628 for (vf = 0; vf < numvfs; vf++) {
630 INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
631 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
641 destroy_workqueue(pf->mbox_pfvf_wq);
645 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
647 struct mbox *mbox = &pf->mbox_pfvf[0];
652 if (pf->mbox_pfvf_wq) {
653 destroy_workqueue(pf->mbox_pfvf_wq);
654 pf->mbox_pfvf_wq = NULL;
657 if (mbox->mbox.hwbase)
658 iounmap(mbox->mbox.hwbase);
660 otx2_mbox_destroy(&mbox->mbox);
663 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
665 /* Clear PF <=> VF mailbox IRQ */
666 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
667 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
669 /* Enable PF <=> VF mailbox IRQ */
670 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
673 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
678 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
682 /* Disable PF <=> VF mailbox IRQ */
683 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
684 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
686 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
687 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
688 free_irq(vector, pf);
691 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
692 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
693 free_irq(vector, pf);
697 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
699 struct otx2_hw *hw = &pf->hw;
703 /* Register MBOX0 interrupt handler */
704 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
706 snprintf(irq_name, NAME_SIZE,
707 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
709 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
710 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
711 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
714 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
719 /* Register MBOX1 interrupt handler */
720 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
722 snprintf(irq_name, NAME_SIZE,
723 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
725 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
726 err = request_irq(pci_irq_vector(pf->pdev,
727 RVU_PF_INT_VEC_VFPF_MBOX1),
728 otx2_pfvf_mbox_intr_handler,
732 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
737 otx2_enable_pfvf_mbox_intr(pf, numvfs);
742 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
743 struct mbox_msghdr *msg)
747 if (msg->id >= MBOX_MSG_MAX) {
749 "Mbox msg with unknown ID 0x%x\n", msg->id);
753 if (msg->sig != OTX2_MBOX_RSP_SIG) {
755 "Mbox msg with wrong signature %x, ID 0x%x\n",
760 /* message response heading VF */
761 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
763 struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
764 struct delayed_work *dwork;
767 case MBOX_MSG_NIX_LF_START_RX:
768 config->intf_down = false;
769 dwork = &config->link_event_work;
770 schedule_delayed_work(dwork, msecs_to_jiffies(100));
772 case MBOX_MSG_NIX_LF_STOP_RX:
773 config->intf_down = true;
782 pf->pcifunc = msg->pcifunc;
784 case MBOX_MSG_MSIX_OFFSET:
785 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
787 case MBOX_MSG_NPA_LF_ALLOC:
788 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
790 case MBOX_MSG_NIX_LF_ALLOC:
791 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
793 case MBOX_MSG_NIX_TXSCH_ALLOC:
794 mbox_handler_nix_txsch_alloc(pf,
795 (struct nix_txsch_alloc_rsp *)msg);
797 case MBOX_MSG_NIX_BP_ENABLE:
798 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
800 case MBOX_MSG_CGX_STATS:
801 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
803 case MBOX_MSG_CGX_FEC_STATS:
804 mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
809 "Mbox msg response has err %d, ID 0x%x\n",
815 static void otx2_pfaf_mbox_handler(struct work_struct *work)
817 struct otx2_mbox_dev *mdev;
818 struct mbox_hdr *rsp_hdr;
819 struct mbox_msghdr *msg;
820 struct otx2_mbox *mbox;
821 struct mbox *af_mbox;
825 af_mbox = container_of(work, struct mbox, mbox_wrk);
826 mbox = &af_mbox->mbox;
827 mdev = &mbox->dev[0];
828 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
830 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
833 for (id = 0; id < af_mbox->num_msgs; id++) {
834 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
835 otx2_process_pfaf_mbox_msg(pf, msg);
836 offset = mbox->rx_start + msg->next_msgoff;
837 if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
838 __otx2_mbox_reset(mbox, 0);
844 static void otx2_handle_link_event(struct otx2_nic *pf)
846 struct cgx_link_user_info *linfo = &pf->linfo;
847 struct net_device *netdev = pf->netdev;
849 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
850 linfo->link_up ? "UP" : "DOWN", linfo->speed,
851 linfo->full_duplex ? "Full" : "Half");
852 if (linfo->link_up) {
853 netif_carrier_on(netdev);
854 netif_tx_start_all_queues(netdev);
856 netif_tx_stop_all_queues(netdev);
857 netif_carrier_off(netdev);
861 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
862 struct cgx_link_info_msg *msg,
867 /* Copy the link info sent by AF */
868 pf->linfo = msg->link_info;
870 /* notify VFs about link event */
871 for (i = 0; i < pci_num_vf(pf->pdev); i++) {
872 struct otx2_vf_config *config = &pf->vf_configs[i];
873 struct delayed_work *dwork = &config->link_event_work;
875 if (config->intf_down)
878 schedule_delayed_work(dwork, msecs_to_jiffies(100));
881 /* interface has not been fully configured yet */
882 if (pf->flags & OTX2_FLAG_INTF_DOWN)
885 otx2_handle_link_event(pf);
889 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
890 struct mbox_msghdr *req)
892 /* Check if valid, if not reply with a invalid msg */
893 if (req->sig != OTX2_MBOX_REQ_SIG) {
894 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
899 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
901 struct _rsp_type *rsp; \
904 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
905 &pf->mbox.mbox_up, 0, \
906 sizeof(struct _rsp_type)); \
911 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
912 rsp->hdr.pcifunc = 0; \
915 err = otx2_mbox_up_handler_ ## _fn_name( \
916 pf, (struct _req_type *)req, rsp); \
923 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
929 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
931 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
932 struct otx2_mbox *mbox = &af_mbox->mbox_up;
933 struct otx2_mbox_dev *mdev = &mbox->dev[0];
934 struct otx2_nic *pf = af_mbox->pfvf;
935 int offset, id, devid = 0;
936 struct mbox_hdr *rsp_hdr;
937 struct mbox_msghdr *msg;
939 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
941 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
943 for (id = 0; id < af_mbox->up_num_msgs; id++) {
944 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
946 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
947 /* Skip processing VF's messages */
949 otx2_process_mbox_msg_up(pf, msg);
950 offset = mbox->rx_start + msg->next_msgoff;
953 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
954 MBOX_DIR_PFVF_UP, devid - 1,
955 af_mbox->up_num_msgs);
959 otx2_mbox_msg_send(mbox, 0);
962 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
964 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
968 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
972 trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
974 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
979 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
981 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
983 /* Disable AF => PF mailbox IRQ */
984 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
985 free_irq(vector, pf);
988 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
990 struct otx2_hw *hw = &pf->hw;
995 /* Register mailbox interrupt handler */
996 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
997 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
998 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
999 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
1002 "RVUPF: IRQ registration failed for PFAF mbox irq\n");
1006 /* Enable mailbox interrupt for msgs coming from AF.
1007 * First clear to avoid spurious interrupts, if any.
1009 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
1010 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
1015 /* Check mailbox communication with AF */
1016 req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1018 otx2_disable_mbox_intr(pf);
1021 err = otx2_sync_mbox_msg(&pf->mbox);
1024 "AF not responding to mailbox, deferring probe\n");
1025 otx2_disable_mbox_intr(pf);
1026 return -EPROBE_DEFER;
1032 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1034 struct mbox *mbox = &pf->mbox;
1037 destroy_workqueue(pf->mbox_wq);
1041 if (mbox->mbox.hwbase)
1042 iounmap((void __iomem *)mbox->mbox.hwbase);
1044 otx2_mbox_destroy(&mbox->mbox);
1045 otx2_mbox_destroy(&mbox->mbox_up);
1048 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1050 struct mbox *mbox = &pf->mbox;
1051 void __iomem *hwbase;
1055 pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
1056 WQ_UNBOUND | WQ_HIGHPRI |
1061 /* Mailbox is a reserved memory (in RAM) region shared between
1062 * admin function (i.e AF) and this PF, shouldn't be mapped as
1063 * device memory to allow unaligned accesses.
1065 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1068 dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1073 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1078 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1079 MBOX_DIR_PFAF_UP, 1);
1083 err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1087 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1088 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1089 mutex_init(&mbox->lock);
1093 otx2_pfaf_mbox_destroy(pf);
1097 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1099 struct msg_req *msg;
1102 mutex_lock(&pf->mbox.lock);
1104 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1106 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1109 mutex_unlock(&pf->mbox.lock);
1113 err = otx2_sync_mbox_msg(&pf->mbox);
1114 mutex_unlock(&pf->mbox.lock);
1118 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1120 struct msg_req *msg;
1123 if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
1124 pf->flow_cfg->dmacflt_max_flows))
1125 netdev_warn(pf->netdev,
1126 "CGX/RPM internal loopback might not work as DMAC filters are active\n");
1128 mutex_lock(&pf->mbox.lock);
1130 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1132 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1135 mutex_unlock(&pf->mbox.lock);
1139 err = otx2_sync_mbox_msg(&pf->mbox);
1140 mutex_unlock(&pf->mbox.lock);
1144 int otx2_set_real_num_queues(struct net_device *netdev,
1145 int tx_queues, int rx_queues)
1149 err = netif_set_real_num_tx_queues(netdev, tx_queues);
1152 "Failed to set no of Tx queues: %d\n", tx_queues);
1156 err = netif_set_real_num_rx_queues(netdev, rx_queues);
1159 "Failed to set no of Rx queues: %d\n", rx_queues);
1162 EXPORT_SYMBOL(otx2_set_real_num_queues);
1164 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1166 struct otx2_nic *pf = data;
1171 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1172 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1173 val = otx2_atomic64_add((qidx << 44), ptr);
1175 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1176 (val & NIX_CQERRINT_BITS));
1177 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1180 if (val & BIT_ULL(42)) {
1181 netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1182 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1184 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1185 netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1187 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1188 netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1192 schedule_work(&pf->reset_task);
1196 for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
1197 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1198 val = otx2_atomic64_add((qidx << 44), ptr);
1199 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1200 (val & NIX_SQINT_BITS));
1202 if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
1205 if (val & BIT_ULL(42)) {
1206 netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1207 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1209 if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
1210 netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
1213 NIX_LF_SQ_OP_ERR_DBG));
1214 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
1217 if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
1218 netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
1220 otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
1221 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
1224 if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
1225 netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
1228 NIX_LF_SEND_ERR_DBG));
1229 otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
1232 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1233 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1237 schedule_work(&pf->reset_task);
1243 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1245 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1246 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1247 int qidx = cq_poll->cint_idx;
1249 /* Disable interrupts.
1251 * Completion interrupts behave in a level-triggered interrupt
1252 * fashion, and hence have to be cleared only after it is serviced.
1254 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1257 napi_schedule_irqoff(&cq_poll->napi);
1262 static void otx2_disable_napi(struct otx2_nic *pf)
1264 struct otx2_qset *qset = &pf->qset;
1265 struct otx2_cq_poll *cq_poll;
1268 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1269 cq_poll = &qset->napi[qidx];
1270 napi_disable(&cq_poll->napi);
1271 netif_napi_del(&cq_poll->napi);
1275 static void otx2_free_cq_res(struct otx2_nic *pf)
1277 struct otx2_qset *qset = &pf->qset;
1278 struct otx2_cq_queue *cq;
1282 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1283 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1284 cq = &qset->cq[qidx];
1285 qmem_free(pf->dev, cq->cqe);
1289 static void otx2_free_sq_res(struct otx2_nic *pf)
1291 struct otx2_qset *qset = &pf->qset;
1292 struct otx2_snd_queue *sq;
1296 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1297 /* Free SQB pointers */
1298 otx2_sq_free_sqbs(pf);
1299 for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
1300 sq = &qset->sq[qidx];
1301 qmem_free(pf->dev, sq->sqe);
1302 qmem_free(pf->dev, sq->tso_hdrs);
1304 kfree(sq->sqb_ptrs);
1308 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1314 /* The data transferred by NIX to memory consists of actual packet
1315 * plus additional data which has timestamp and/or EDSA/HIGIG2
1316 * headers if interface is configured in corresponding modes.
1317 * NIX transfers entire data using 6 segments/buffers and writes
1318 * a CQE_RX descriptor with those segment addresses. First segment
1319 * has additional data prepended to packet. Also software omits a
1320 * headroom of 128 bytes in each segment. Hence the total size of
1321 * memory needed to receive a packet with 'mtu' is:
1322 * frame size = mtu + additional data;
1323 * memory = frame_size + headroom * 6;
1324 * each receive buffer size = memory / 6;
1326 frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1327 total_size = frame_size + OTX2_HEAD_ROOM * 6;
1328 rbuf_size = total_size / 6;
1330 return ALIGN(rbuf_size, 2048);
1333 static int otx2_init_hw_resources(struct otx2_nic *pf)
1335 struct nix_lf_free_req *free_req;
1336 struct mbox *mbox = &pf->mbox;
1337 struct otx2_hw *hw = &pf->hw;
1338 struct msg_req *req;
1341 /* Set required NPA LF's pool counts
1342 * Auras and Pools are used in a 1:1 mapping,
1343 * so, aura count = pool count.
1345 hw->rqpool_cnt = hw->rx_queues;
1346 hw->sqpool_cnt = hw->tot_tx_queues;
1347 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1349 /* Maximum hardware supported transmit length */
1350 pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
1352 pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1354 mutex_lock(&mbox->lock);
1356 err = otx2_config_npa(pf);
1361 err = otx2_config_nix(pf);
1363 goto err_free_npa_lf;
1365 /* Enable backpressure */
1366 otx2_nix_config_bp(pf, true);
1368 /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1369 err = otx2_rq_aura_pool_init(pf);
1371 mutex_unlock(&mbox->lock);
1372 goto err_free_nix_lf;
1374 /* Init Auras and pools used by NIX SQ, for queueing SQEs */
1375 err = otx2_sq_aura_pool_init(pf);
1377 mutex_unlock(&mbox->lock);
1378 goto err_free_rq_ptrs;
1381 err = otx2_txsch_alloc(pf);
1383 mutex_unlock(&mbox->lock);
1384 goto err_free_sq_ptrs;
1387 err = otx2_config_nix_queues(pf);
1389 mutex_unlock(&mbox->lock);
1390 goto err_free_txsch;
1392 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1393 err = otx2_txschq_config(pf, lvl);
1395 mutex_unlock(&mbox->lock);
1396 goto err_free_nix_queues;
1399 mutex_unlock(&mbox->lock);
1402 err_free_nix_queues:
1403 otx2_free_sq_res(pf);
1404 otx2_free_cq_res(pf);
1405 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1407 if (otx2_txschq_stop(pf))
1408 dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
1410 otx2_sq_free_sqbs(pf);
1412 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1413 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1414 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1415 otx2_aura_pool_free(pf);
1417 mutex_lock(&mbox->lock);
1418 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1420 free_req->flags = NIX_LF_DISABLE_FLOWS;
1421 if (otx2_sync_mbox_msg(mbox))
1422 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1426 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1428 if (otx2_sync_mbox_msg(mbox))
1429 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1432 mutex_unlock(&mbox->lock);
1436 static void otx2_free_hw_resources(struct otx2_nic *pf)
1438 struct otx2_qset *qset = &pf->qset;
1439 struct nix_lf_free_req *free_req;
1440 struct mbox *mbox = &pf->mbox;
1441 struct otx2_cq_queue *cq;
1442 struct msg_req *req;
1445 /* Ensure all SQE are processed */
1448 /* Stop transmission */
1449 err = otx2_txschq_stop(pf);
1451 dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
1453 mutex_lock(&mbox->lock);
1454 /* Disable backpressure */
1455 if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1456 otx2_nix_config_bp(pf, false);
1457 mutex_unlock(&mbox->lock);
1460 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1462 /*Dequeue all CQEs */
1463 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1464 cq = &qset->cq[qidx];
1465 if (cq->cq_type == CQ_RX)
1466 otx2_cleanup_rx_cqes(pf, cq);
1468 otx2_cleanup_tx_cqes(pf, cq);
1471 otx2_free_sq_res(pf);
1473 /* Free RQ buffer pointers*/
1474 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1476 otx2_free_cq_res(pf);
1478 /* Free all ingress bandwidth profiles allocated */
1479 cn10k_free_all_ipolicers(pf);
1481 mutex_lock(&mbox->lock);
1483 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1485 free_req->flags = NIX_LF_DISABLE_FLOWS;
1486 if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1487 free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1488 if (otx2_sync_mbox_msg(mbox))
1489 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1491 mutex_unlock(&mbox->lock);
1493 /* Disable NPA Pool and Aura hw context */
1494 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1495 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1496 otx2_aura_pool_free(pf);
1498 mutex_lock(&mbox->lock);
1500 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1502 if (otx2_sync_mbox_msg(mbox))
1503 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1505 mutex_unlock(&mbox->lock);
1508 static void otx2_do_set_rx_mode(struct otx2_nic *pf)
1510 struct net_device *netdev = pf->netdev;
1511 struct nix_rx_mode *req;
1512 bool promisc = false;
1514 if (!(netdev->flags & IFF_UP))
1517 if ((netdev->flags & IFF_PROMISC) ||
1518 (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
1522 /* Write unicast address to mcam entries or del from mcam */
1523 if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1524 __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1526 mutex_lock(&pf->mbox.lock);
1527 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1529 mutex_unlock(&pf->mbox.lock);
1533 req->mode = NIX_RX_MODE_UCAST;
1536 req->mode |= NIX_RX_MODE_PROMISC;
1537 if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1538 req->mode |= NIX_RX_MODE_ALLMULTI;
1540 req->mode |= NIX_RX_MODE_USE_MCE;
1542 otx2_sync_mbox_msg(&pf->mbox);
1543 mutex_unlock(&pf->mbox.lock);
1546 int otx2_open(struct net_device *netdev)
1548 struct otx2_nic *pf = netdev_priv(netdev);
1549 struct otx2_cq_poll *cq_poll = NULL;
1550 struct otx2_qset *qset = &pf->qset;
1551 int err = 0, qidx, vec;
1554 netif_carrier_off(netdev);
1556 pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
1557 /* RQ and SQs are mapped to different CQs,
1558 * so find out max CQ IRQs (i.e CINTs) needed.
1560 pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
1561 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1566 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1568 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1571 qset->cq = kcalloc(pf->qset.cq_cnt,
1572 sizeof(struct otx2_cq_queue), GFP_KERNEL);
1576 qset->sq = kcalloc(pf->hw.tot_tx_queues,
1577 sizeof(struct otx2_snd_queue), GFP_KERNEL);
1581 qset->rq = kcalloc(pf->hw.rx_queues,
1582 sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1586 err = otx2_init_hw_resources(pf);
1590 /* Register NAPI handler */
1591 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1592 cq_poll = &qset->napi[qidx];
1593 cq_poll->cint_idx = qidx;
1594 /* RQ0 & SQ0 are mapped to CINT0 and so on..
1595 * 'cq_ids[0]' points to RQ's CQ and
1596 * 'cq_ids[1]' points to SQ's CQ and
1597 * 'cq_ids[2]' points to XDP's CQ and
1599 cq_poll->cq_ids[CQ_RX] =
1600 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1601 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1602 qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1604 cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
1605 (qidx + pf->hw.rx_queues +
1609 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
1611 cq_poll->dev = (void *)pf;
1612 netif_napi_add(netdev, &cq_poll->napi,
1613 otx2_napi_handler, NAPI_POLL_WEIGHT);
1614 napi_enable(&cq_poll->napi);
1617 /* Set maximum frame size allowed in HW */
1618 err = otx2_hw_set_mtu(pf, netdev->mtu);
1620 goto err_disable_napi;
1622 /* Setup segmentation algorithms, if failed, clear offload capability */
1623 otx2_setup_segmentation(pf);
1625 /* Initialize RSS */
1626 err = otx2_rss_init(pf);
1628 goto err_disable_napi;
1630 /* Register Queue IRQ handlers */
1631 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1632 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1634 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1636 err = request_irq(pci_irq_vector(pf->pdev, vec),
1637 otx2_q_intr_handler, 0, irq_name, pf);
1640 "RVUPF%d: IRQ registration failed for QERR\n",
1641 rvu_get_pf(pf->pcifunc));
1642 goto err_disable_napi;
1645 /* Enable QINT IRQ */
1646 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1648 /* Register CQ IRQ handlers */
1649 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1650 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1651 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1653 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
1656 err = request_irq(pci_irq_vector(pf->pdev, vec),
1657 otx2_cq_intr_handler, 0, irq_name,
1661 "RVUPF%d: IRQ registration failed for CQ%d\n",
1662 rvu_get_pf(pf->pcifunc), qidx);
1663 goto err_free_cints;
1667 otx2_config_irq_coalescing(pf, qidx);
1670 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1671 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1674 otx2_set_cints_affinity(pf);
1676 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1677 otx2_enable_rxvlan(pf, true);
1679 /* When reinitializing enable time stamping if it is enabled before */
1680 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1681 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1682 otx2_config_hw_tx_tstamp(pf, true);
1684 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1685 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1686 otx2_config_hw_rx_tstamp(pf, true);
1689 pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1690 /* 'intf_down' may be checked on any cpu */
1693 /* we have already received link status notification */
1694 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1695 otx2_handle_link_event(pf);
1697 /* Restore pause frame settings */
1698 otx2_config_pause_frm(pf);
1700 /* Install DMAC Filters */
1701 if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
1702 otx2_dmacflt_reinstall_flows(pf);
1704 err = otx2_rxtx_enable(pf, true);
1706 goto err_tx_stop_queues;
1708 otx2_do_set_rx_mode(pf);
1713 netif_tx_stop_all_queues(netdev);
1714 netif_carrier_off(netdev);
1715 pf->flags |= OTX2_FLAG_INTF_DOWN;
1717 otx2_free_cints(pf, qidx);
1718 vec = pci_irq_vector(pf->pdev,
1719 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1720 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1721 synchronize_irq(vec);
1724 otx2_disable_napi(pf);
1725 otx2_free_hw_resources(pf);
1733 EXPORT_SYMBOL(otx2_open);
1735 int otx2_stop(struct net_device *netdev)
1737 struct otx2_nic *pf = netdev_priv(netdev);
1738 struct otx2_cq_poll *cq_poll = NULL;
1739 struct otx2_qset *qset = &pf->qset;
1740 struct otx2_rss_info *rss;
1743 /* If the DOWN flag is set resources are already freed */
1744 if (pf->flags & OTX2_FLAG_INTF_DOWN)
1747 netif_carrier_off(netdev);
1748 netif_tx_stop_all_queues(netdev);
1750 pf->flags |= OTX2_FLAG_INTF_DOWN;
1751 /* 'intf_down' may be checked on any cpu */
1754 /* First stop packet Rx/Tx */
1755 otx2_rxtx_enable(pf, false);
1757 /* Clear RSS enable flag */
1758 rss = &pf->hw.rss_info;
1759 rss->enable = false;
1761 /* Cleanup Queue IRQ */
1762 vec = pci_irq_vector(pf->pdev,
1763 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1764 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1765 synchronize_irq(vec);
1768 /* Cleanup CQ NAPI and IRQ */
1769 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1770 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1771 /* Disable interrupt */
1772 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1774 synchronize_irq(pci_irq_vector(pf->pdev, vec));
1776 cq_poll = &qset->napi[qidx];
1777 napi_synchronize(&cq_poll->napi);
1781 netif_tx_disable(netdev);
1783 otx2_free_hw_resources(pf);
1784 otx2_free_cints(pf, pf->hw.cint_cnt);
1785 otx2_disable_napi(pf);
1787 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1788 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1790 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1791 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1792 devm_kfree(pf->dev, pf->refill_wrk);
1798 /* Do not clear RQ/SQ ringsize settings */
1799 memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
1800 sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
1803 EXPORT_SYMBOL(otx2_stop);
1805 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
1807 struct otx2_nic *pf = netdev_priv(netdev);
1808 int qidx = skb_get_queue_mapping(skb);
1809 struct otx2_snd_queue *sq;
1810 struct netdev_queue *txq;
1812 /* Check for minimum and maximum packet length */
1813 if (skb->len <= ETH_HLEN ||
1814 (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
1816 return NETDEV_TX_OK;
1819 sq = &pf->qset.sq[qidx];
1820 txq = netdev_get_tx_queue(netdev, qidx);
1822 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1823 netif_tx_stop_queue(txq);
1825 /* Check again, incase SQBs got freed up */
1827 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1829 netif_tx_wake_queue(txq);
1831 return NETDEV_TX_BUSY;
1834 return NETDEV_TX_OK;
1837 static netdev_features_t otx2_fix_features(struct net_device *dev,
1838 netdev_features_t features)
1840 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1841 features |= NETIF_F_HW_VLAN_STAG_RX;
1843 features &= ~NETIF_F_HW_VLAN_STAG_RX;
1848 static void otx2_set_rx_mode(struct net_device *netdev)
1850 struct otx2_nic *pf = netdev_priv(netdev);
1852 queue_work(pf->otx2_wq, &pf->rx_mode_work);
1855 static void otx2_rx_mode_wrk_handler(struct work_struct *work)
1857 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
1859 otx2_do_set_rx_mode(pf);
1862 static int otx2_set_features(struct net_device *netdev,
1863 netdev_features_t features)
1865 netdev_features_t changed = features ^ netdev->features;
1866 bool ntuple = !!(features & NETIF_F_NTUPLE);
1867 struct otx2_nic *pf = netdev_priv(netdev);
1868 bool tc = !!(features & NETIF_F_HW_TC);
1870 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1871 return otx2_cgx_config_loopback(pf,
1872 features & NETIF_F_LOOPBACK);
1874 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
1875 return otx2_enable_rxvlan(pf,
1876 features & NETIF_F_HW_VLAN_CTAG_RX);
1878 if ((changed & NETIF_F_NTUPLE) && !ntuple)
1879 otx2_destroy_ntuple_flows(pf);
1881 if ((changed & NETIF_F_NTUPLE) && ntuple) {
1882 if (!pf->flow_cfg->max_flows) {
1884 "Can't enable NTUPLE, MCAM entries not allocated\n");
1889 if ((changed & NETIF_F_HW_TC) && tc) {
1890 if (!pf->flow_cfg->max_flows) {
1892 "Can't enable TC, MCAM entries not allocated\n");
1897 if ((changed & NETIF_F_HW_TC) && !tc &&
1898 pf->flow_cfg && pf->flow_cfg->nr_flows) {
1899 netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
1903 if ((changed & NETIF_F_NTUPLE) && ntuple &&
1904 (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
1906 "Can't enable NTUPLE when TC is active, disable TC and retry\n");
1910 if ((changed & NETIF_F_HW_TC) && tc &&
1911 (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
1913 "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
1920 static void otx2_reset_task(struct work_struct *work)
1922 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1924 if (!netif_running(pf->netdev))
1928 otx2_stop(pf->netdev);
1930 otx2_open(pf->netdev);
1931 netif_trans_update(pf->netdev);
1935 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
1937 struct msg_req *req;
1940 if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
1943 mutex_lock(&pfvf->mbox.lock);
1945 req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
1947 req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
1949 mutex_unlock(&pfvf->mbox.lock);
1953 err = otx2_sync_mbox_msg(&pfvf->mbox);
1955 mutex_unlock(&pfvf->mbox.lock);
1959 mutex_unlock(&pfvf->mbox.lock);
1961 pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
1963 pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1967 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
1969 struct msg_req *req;
1972 if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
1975 mutex_lock(&pfvf->mbox.lock);
1977 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
1979 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
1981 mutex_unlock(&pfvf->mbox.lock);
1985 err = otx2_sync_mbox_msg(&pfvf->mbox);
1987 mutex_unlock(&pfvf->mbox.lock);
1991 mutex_unlock(&pfvf->mbox.lock);
1993 pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
1995 pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1999 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
2001 struct otx2_nic *pfvf = netdev_priv(netdev);
2002 struct hwtstamp_config config;
2007 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2010 switch (config.tx_type) {
2011 case HWTSTAMP_TX_OFF:
2012 otx2_config_hw_tx_tstamp(pfvf, false);
2014 case HWTSTAMP_TX_ON:
2015 otx2_config_hw_tx_tstamp(pfvf, true);
2021 switch (config.rx_filter) {
2022 case HWTSTAMP_FILTER_NONE:
2023 otx2_config_hw_rx_tstamp(pfvf, false);
2025 case HWTSTAMP_FILTER_ALL:
2026 case HWTSTAMP_FILTER_SOME:
2027 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2028 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2029 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2030 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2031 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2032 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2033 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2034 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2035 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2036 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2037 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2038 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2039 otx2_config_hw_rx_tstamp(pfvf, true);
2040 config.rx_filter = HWTSTAMP_FILTER_ALL;
2046 memcpy(&pfvf->tstamp, &config, sizeof(config));
2048 return copy_to_user(ifr->ifr_data, &config,
2049 sizeof(config)) ? -EFAULT : 0;
2051 EXPORT_SYMBOL(otx2_config_hwtstamp);
2053 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
2055 struct otx2_nic *pfvf = netdev_priv(netdev);
2056 struct hwtstamp_config *cfg = &pfvf->tstamp;
2060 return otx2_config_hwtstamp(netdev, req);
2062 return copy_to_user(req->ifr_data, cfg,
2063 sizeof(*cfg)) ? -EFAULT : 0;
2068 EXPORT_SYMBOL(otx2_ioctl);
2070 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2072 struct npc_install_flow_req *req;
2075 mutex_lock(&pf->mbox.lock);
2076 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2082 ether_addr_copy(req->packet.dmac, mac);
2083 eth_broadcast_addr((u8 *)&req->mask.dmac);
2084 req->features = BIT_ULL(NPC_DMAC);
2085 req->channel = pf->hw.rx_chan_base;
2086 req->intf = NIX_INTF_RX;
2087 req->default_rule = 1;
2090 req->op = NIX_RX_ACTION_DEFAULT;
2092 err = otx2_sync_mbox_msg(&pf->mbox);
2094 mutex_unlock(&pf->mbox.lock);
2098 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2100 struct otx2_nic *pf = netdev_priv(netdev);
2101 struct pci_dev *pdev = pf->pdev;
2102 struct otx2_vf_config *config;
2105 if (!netif_running(netdev))
2108 if (vf >= pf->total_vfs)
2111 if (!is_valid_ether_addr(mac))
2114 config = &pf->vf_configs[vf];
2115 ether_addr_copy(config->mac, mac);
2117 ret = otx2_do_set_vf_mac(pf, vf, mac);
2119 dev_info(&pdev->dev,
2120 "Load/Reload VF driver\n");
2125 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2128 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2129 struct nix_vtag_config_rsp *vtag_rsp;
2130 struct npc_delete_flow_req *del_req;
2131 struct nix_vtag_config *vtag_req;
2132 struct npc_install_flow_req *req;
2133 struct otx2_vf_config *config;
2137 config = &pf->vf_configs[vf];
2139 if (!vlan && !config->vlan)
2142 mutex_lock(&pf->mbox.lock);
2144 /* free old tx vtag entry */
2146 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2151 vtag_req->cfg_type = 0;
2152 vtag_req->tx.free_vtag0 = 1;
2153 vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2155 err = otx2_sync_mbox_msg(&pf->mbox);
2160 if (!vlan && config->vlan) {
2162 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2167 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2169 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2170 err = otx2_sync_mbox_msg(&pf->mbox);
2175 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2180 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2182 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2183 err = otx2_sync_mbox_msg(&pf->mbox);
2189 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2195 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2196 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2197 req->packet.vlan_tci = htons(vlan);
2198 req->mask.vlan_tci = htons(VLAN_VID_MASK);
2199 /* af fills the destination mac addr */
2200 eth_broadcast_addr((u8 *)&req->mask.dmac);
2201 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2202 req->channel = pf->hw.rx_chan_base;
2203 req->intf = NIX_INTF_RX;
2205 req->op = NIX_RX_ACTION_DEFAULT;
2206 req->vtag0_valid = true;
2207 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2210 err = otx2_sync_mbox_msg(&pf->mbox);
2215 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2221 /* configure tx vtag params */
2222 vtag_req->vtag_size = VTAGSIZE_T4;
2223 vtag_req->cfg_type = 0; /* tx vlan cfg */
2224 vtag_req->tx.cfg_vtag0 = 1;
2225 vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2227 err = otx2_sync_mbox_msg(&pf->mbox);
2231 vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2232 (&pf->mbox.mbox, 0, &vtag_req->hdr);
2233 if (IS_ERR(vtag_rsp)) {
2234 err = PTR_ERR(vtag_rsp);
2237 config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2239 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2245 eth_zero_addr((u8 *)&req->mask.dmac);
2246 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2247 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2248 req->features = BIT_ULL(NPC_DMAC);
2249 req->channel = pf->hw.tx_chan_base;
2250 req->intf = NIX_INTF_TX;
2252 req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2253 req->vtag0_def = vtag_rsp->vtag0_idx;
2254 req->vtag0_op = VTAG_INSERT;
2257 err = otx2_sync_mbox_msg(&pf->mbox);
2259 config->vlan = vlan;
2260 mutex_unlock(&pf->mbox.lock);
2264 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2267 struct otx2_nic *pf = netdev_priv(netdev);
2268 struct pci_dev *pdev = pf->pdev;
2270 if (!netif_running(netdev))
2273 if (vf >= pci_num_vf(pdev))
2276 /* qos is currently unsupported */
2277 if (vlan >= VLAN_N_VID || qos)
2280 if (proto != htons(ETH_P_8021Q))
2281 return -EPROTONOSUPPORT;
2283 if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2286 return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2289 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2290 struct ifla_vf_info *ivi)
2292 struct otx2_nic *pf = netdev_priv(netdev);
2293 struct pci_dev *pdev = pf->pdev;
2294 struct otx2_vf_config *config;
2296 if (!netif_running(netdev))
2299 if (vf >= pci_num_vf(pdev))
2302 config = &pf->vf_configs[vf];
2304 ether_addr_copy(ivi->mac, config->mac);
2305 ivi->vlan = config->vlan;
2306 ivi->trusted = config->trusted;
2311 static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
2318 dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
2319 offset_in_page(xdpf->data), xdpf->len,
2321 if (dma_mapping_error(pf->dev, dma_addr))
2324 err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
2326 otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
2327 page = virt_to_page(xdpf->data);
2334 static int otx2_xdp_xmit(struct net_device *netdev, int n,
2335 struct xdp_frame **frames, u32 flags)
2337 struct otx2_nic *pf = netdev_priv(netdev);
2338 int qidx = smp_processor_id();
2339 struct otx2_snd_queue *sq;
2342 if (!netif_running(netdev))
2345 qidx += pf->hw.tx_queues;
2346 sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
2348 /* Abort xmit if xdp queue is not */
2352 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2355 for (i = 0; i < n; i++) {
2356 struct xdp_frame *xdpf = frames[i];
2359 err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
2366 static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
2368 struct net_device *dev = pf->netdev;
2369 bool if_up = netif_running(pf->netdev);
2370 struct bpf_prog *old_prog;
2372 if (prog && dev->mtu > MAX_XDP_MTU) {
2373 netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
2378 otx2_stop(pf->netdev);
2380 old_prog = xchg(&pf->xdp_prog, prog);
2383 bpf_prog_put(old_prog);
2386 bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
2388 /* Network stack and XDP shared same rx queues.
2389 * Use separate tx queues for XDP and network stack.
2392 pf->hw.xdp_queues = pf->hw.rx_queues;
2394 pf->hw.xdp_queues = 0;
2396 pf->hw.tot_tx_queues += pf->hw.xdp_queues;
2399 otx2_open(pf->netdev);
2404 static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2406 struct otx2_nic *pf = netdev_priv(netdev);
2408 switch (xdp->command) {
2409 case XDP_SETUP_PROG:
2410 return otx2_xdp_setup(pf, xdp->prog);
2416 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
2419 struct set_vf_perm *req;
2422 mutex_lock(&pf->mbox.lock);
2423 req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
2429 /* Let AF reset VF permissions as sriov is disabled */
2430 if (req_perm == OTX2_RESET_VF_PERM) {
2431 req->flags |= RESET_VF_PERM;
2432 } else if (req_perm == OTX2_TRUSTED_VF) {
2433 if (pf->vf_configs[vf].trusted)
2434 req->flags |= VF_TRUSTED;
2438 rc = otx2_sync_mbox_msg(&pf->mbox);
2440 mutex_unlock(&pf->mbox.lock);
2444 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
2447 struct otx2_nic *pf = netdev_priv(netdev);
2448 struct pci_dev *pdev = pf->pdev;
2451 if (vf >= pci_num_vf(pdev))
2454 if (pf->vf_configs[vf].trusted == enable)
2457 pf->vf_configs[vf].trusted = enable;
2458 rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
2461 pf->vf_configs[vf].trusted = !enable;
2463 netdev_info(pf->netdev, "VF %d is %strusted\n",
2464 vf, enable ? "" : "not ");
2468 static const struct net_device_ops otx2_netdev_ops = {
2469 .ndo_open = otx2_open,
2470 .ndo_stop = otx2_stop,
2471 .ndo_start_xmit = otx2_xmit,
2472 .ndo_fix_features = otx2_fix_features,
2473 .ndo_set_mac_address = otx2_set_mac_address,
2474 .ndo_change_mtu = otx2_change_mtu,
2475 .ndo_set_rx_mode = otx2_set_rx_mode,
2476 .ndo_set_features = otx2_set_features,
2477 .ndo_tx_timeout = otx2_tx_timeout,
2478 .ndo_get_stats64 = otx2_get_stats64,
2479 .ndo_eth_ioctl = otx2_ioctl,
2480 .ndo_set_vf_mac = otx2_set_vf_mac,
2481 .ndo_set_vf_vlan = otx2_set_vf_vlan,
2482 .ndo_get_vf_config = otx2_get_vf_config,
2483 .ndo_bpf = otx2_xdp,
2484 .ndo_xdp_xmit = otx2_xdp_xmit,
2485 .ndo_setup_tc = otx2_setup_tc,
2486 .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
2489 static int otx2_wq_init(struct otx2_nic *pf)
2491 pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2495 INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
2496 INIT_WORK(&pf->reset_task, otx2_reset_task);
2500 static int otx2_check_pf_usable(struct otx2_nic *nic)
2504 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2505 rev = (rev >> 12) & 0xFF;
2506 /* Check if AF has setup revision for RVUM block,
2507 * otherwise this driver probe should be deferred
2508 * until AF driver comes up.
2512 "AF is not initialized, deferring probe\n");
2513 return -EPROBE_DEFER;
2518 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2520 struct otx2_hw *hw = &pf->hw;
2523 /* NPA interrupts are inot registered, so alloc only
2524 * upto NIX vector offset.
2526 num_vec = hw->nix_msixoff;
2527 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2529 otx2_disable_mbox_intr(pf);
2530 pci_free_irq_vectors(hw->pdev);
2531 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2533 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2538 return otx2_register_mbox_intr(pf, false);
2541 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
2545 pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
2546 sizeof(struct otx2_vf_config),
2548 if (!pf->vf_configs)
2551 for (i = 0; i < pf->total_vfs; i++) {
2552 pf->vf_configs[i].pf = pf;
2553 pf->vf_configs[i].intf_down = true;
2554 pf->vf_configs[i].trusted = false;
2555 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2556 otx2_vf_link_event_task);
2562 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
2566 if (!pf->vf_configs)
2569 for (i = 0; i < pf->total_vfs; i++) {
2570 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2571 otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
2575 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2577 struct device *dev = &pdev->dev;
2578 struct net_device *netdev;
2579 struct otx2_nic *pf;
2584 err = pcim_enable_device(pdev);
2586 dev_err(dev, "Failed to enable PCI device\n");
2590 err = pci_request_regions(pdev, DRV_NAME);
2592 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2596 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2598 dev_err(dev, "DMA mask config failed, abort\n");
2599 goto err_release_regions;
2602 pci_set_master(pdev);
2604 /* Set number of queues */
2605 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2607 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
2610 goto err_release_regions;
2613 pci_set_drvdata(pdev, netdev);
2614 SET_NETDEV_DEV(netdev, &pdev->dev);
2615 pf = netdev_priv(netdev);
2616 pf->netdev = netdev;
2619 pf->total_vfs = pci_sriov_get_totalvfs(pdev);
2620 pf->flags |= OTX2_FLAG_INTF_DOWN;
2624 hw->rx_queues = qcount;
2625 hw->tx_queues = qcount;
2626 hw->tot_tx_queues = qcount;
2627 hw->max_queues = qcount;
2629 num_vec = pci_msix_vec_count(pdev);
2630 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2632 if (!hw->irq_name) {
2634 goto err_free_netdev;
2637 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2638 sizeof(cpumask_var_t), GFP_KERNEL);
2639 if (!hw->affinity_mask) {
2641 goto err_free_netdev;
2645 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2646 if (!pf->reg_base) {
2647 dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2649 goto err_free_netdev;
2652 err = otx2_check_pf_usable(pf);
2654 goto err_free_netdev;
2656 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2657 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2659 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2661 goto err_free_netdev;
2664 otx2_setup_dev_hw_settings(pf);
2666 /* Init PF <=> AF mailbox stuff */
2667 err = otx2_pfaf_mbox_init(pf);
2669 goto err_free_irq_vectors;
2671 /* Register mailbox interrupt */
2672 err = otx2_register_mbox_intr(pf, true);
2674 goto err_mbox_destroy;
2676 /* Request AF to attach NPA and NIX LFs to this PF.
2677 * NIX and NPA LFs are needed for this PF to function as a NIC.
2679 err = otx2_attach_npa_nix(pf);
2681 goto err_disable_mbox_intr;
2683 err = otx2_realloc_msix_vectors(pf);
2685 goto err_detach_rsrc;
2687 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
2689 goto err_detach_rsrc;
2691 err = cn10k_lmtst_init(pf);
2693 goto err_detach_rsrc;
2695 /* Assign default mac address */
2696 otx2_get_mac_from_af(netdev);
2698 /* Don't check for error. Proceed without ptp */
2701 /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
2702 * HW allocates buffer pointer from stack and uses it for DMA'ing
2703 * ingress packet. In some scenarios HW can free back allocated buffer
2704 * pointers to pool. This makes it impossible for SW to maintain a
2705 * parallel list where physical addresses of buffer pointers (IOVAs)
2706 * given to HW can be saved for later reference.
2708 * So the only way to convert Rx packet's buffer address is to use
2709 * IOMMU's iova_to_phys() handler which translates the address by
2710 * walking through the translation tables.
2712 pf->iommu_domain = iommu_get_domain_for_dev(dev);
2714 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2715 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
2716 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2717 NETIF_F_GSO_UDP_L4);
2718 netdev->features |= netdev->hw_features;
2720 err = otx2_mcam_flow_init(pf);
2722 goto err_ptp_destroy;
2724 if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
2725 netdev->hw_features |= NETIF_F_NTUPLE;
2727 if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
2728 netdev->priv_flags |= IFF_UNICAST_FLT;
2730 /* Support TSO on tag interface */
2731 netdev->vlan_features |= netdev->features;
2732 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
2733 NETIF_F_HW_VLAN_STAG_TX;
2734 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
2735 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
2736 NETIF_F_HW_VLAN_STAG_RX;
2737 netdev->features |= netdev->hw_features;
2739 /* HW supports tc offload but mutually exclusive with n-tuple filters */
2740 if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
2741 netdev->hw_features |= NETIF_F_HW_TC;
2743 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
2745 netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
2746 netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
2748 netdev->netdev_ops = &otx2_netdev_ops;
2750 netdev->min_mtu = OTX2_MIN_MTU;
2751 netdev->max_mtu = otx2_get_max_mtu(pf);
2753 err = register_netdev(netdev);
2755 dev_err(dev, "Failed to register netdevice\n");
2756 goto err_del_mcam_entries;
2759 err = otx2_wq_init(pf);
2761 goto err_unreg_netdev;
2763 otx2_set_ethtool_ops(netdev);
2765 err = otx2_init_tc(pf);
2767 goto err_mcam_flow_del;
2769 err = otx2_register_dl(pf);
2771 goto err_mcam_flow_del;
2773 /* Initialize SR-IOV resources */
2774 err = otx2_sriov_vfcfg_init(pf);
2776 goto err_pf_sriov_init;
2778 /* Enable link notifications */
2779 otx2_cgx_config_linkevents(pf, true);
2781 /* Enable pause frames by default */
2782 pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
2783 pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
2788 otx2_shutdown_tc(pf);
2790 otx2_mcam_flow_del(pf);
2792 unregister_netdev(netdev);
2793 err_del_mcam_entries:
2794 otx2_mcam_flow_del(pf);
2796 otx2_ptp_destroy(pf);
2798 if (pf->hw.lmt_info)
2799 free_percpu(pf->hw.lmt_info);
2800 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
2801 qmem_free(pf->dev, pf->dync_lmt);
2802 otx2_detach_resources(&pf->mbox);
2803 err_disable_mbox_intr:
2804 otx2_disable_mbox_intr(pf);
2806 otx2_pfaf_mbox_destroy(pf);
2807 err_free_irq_vectors:
2808 pci_free_irq_vectors(hw->pdev);
2810 pci_set_drvdata(pdev, NULL);
2811 free_netdev(netdev);
2812 err_release_regions:
2813 pci_release_regions(pdev);
2817 static void otx2_vf_link_event_task(struct work_struct *work)
2819 struct otx2_vf_config *config;
2820 struct cgx_link_info_msg *req;
2821 struct mbox_msghdr *msghdr;
2822 struct otx2_nic *pf;
2825 config = container_of(work, struct otx2_vf_config,
2826 link_event_work.work);
2827 vf_idx = config - config->pf->vf_configs;
2830 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
2831 sizeof(*req), sizeof(struct msg_rsp));
2833 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
2837 req = (struct cgx_link_info_msg *)msghdr;
2838 req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
2839 req->hdr.sig = OTX2_MBOX_REQ_SIG;
2840 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
2842 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
2845 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
2847 struct net_device *netdev = pci_get_drvdata(pdev);
2848 struct otx2_nic *pf = netdev_priv(netdev);
2851 /* Init PF <=> VF mailbox stuff */
2852 ret = otx2_pfvf_mbox_init(pf, numvfs);
2856 ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
2860 ret = otx2_pf_flr_init(pf, numvfs);
2864 ret = otx2_register_flr_me_intr(pf, numvfs);
2868 ret = pci_enable_sriov(pdev, numvfs);
2874 otx2_disable_flr_me_intr(pf);
2876 otx2_flr_wq_destroy(pf);
2878 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2880 otx2_pfvf_mbox_destroy(pf);
2884 static int otx2_sriov_disable(struct pci_dev *pdev)
2886 struct net_device *netdev = pci_get_drvdata(pdev);
2887 struct otx2_nic *pf = netdev_priv(netdev);
2888 int numvfs = pci_num_vf(pdev);
2893 pci_disable_sriov(pdev);
2895 otx2_disable_flr_me_intr(pf);
2896 otx2_flr_wq_destroy(pf);
2897 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2898 otx2_pfvf_mbox_destroy(pf);
2903 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
2906 return otx2_sriov_disable(pdev);
2908 return otx2_sriov_enable(pdev, numvfs);
2911 static void otx2_remove(struct pci_dev *pdev)
2913 struct net_device *netdev = pci_get_drvdata(pdev);
2914 struct otx2_nic *pf;
2919 pf = netdev_priv(netdev);
2921 pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
2923 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
2924 otx2_config_hw_tx_tstamp(pf, false);
2925 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
2926 otx2_config_hw_rx_tstamp(pf, false);
2928 cancel_work_sync(&pf->reset_task);
2929 /* Disable link notifications */
2930 otx2_cgx_config_linkevents(pf, false);
2932 otx2_unregister_dl(pf);
2933 unregister_netdev(netdev);
2934 otx2_sriov_disable(pf->pdev);
2935 otx2_sriov_vfcfg_cleanup(pf);
2937 destroy_workqueue(pf->otx2_wq);
2939 otx2_ptp_destroy(pf);
2940 otx2_mcam_flow_del(pf);
2941 otx2_shutdown_tc(pf);
2942 otx2_detach_resources(&pf->mbox);
2943 if (pf->hw.lmt_info)
2944 free_percpu(pf->hw.lmt_info);
2945 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
2946 qmem_free(pf->dev, pf->dync_lmt);
2947 otx2_disable_mbox_intr(pf);
2948 otx2_pfaf_mbox_destroy(pf);
2949 pci_free_irq_vectors(pf->pdev);
2950 pci_set_drvdata(pdev, NULL);
2951 free_netdev(netdev);
2953 pci_release_regions(pdev);
2956 static struct pci_driver otx2_pf_driver = {
2958 .id_table = otx2_pf_id_table,
2959 .probe = otx2_probe,
2960 .shutdown = otx2_remove,
2961 .remove = otx2_remove,
2962 .sriov_configure = otx2_sriov_configure
2965 static int __init otx2_rvupf_init_module(void)
2967 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2969 return pci_register_driver(&otx2_pf_driver);
2972 static void __exit otx2_rvupf_cleanup_module(void)
2974 pci_unregister_driver(&otx2_pf_driver);
2977 module_init(otx2_rvupf_init_module);
2978 module_exit(otx2_rvupf_cleanup_module);