1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Physical Function ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/iommu.h>
18 #include "otx2_common.h"
19 #include "otx2_txrx.h"
20 #include "otx2_struct.h"
23 #include <rvu_trace.h>
25 #define DRV_NAME "rvu_nicpf"
26 #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
28 /* Supported devices */
29 static const struct pci_device_id otx2_pf_id_table[] = {
30 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
31 { 0, } /* end of table */
34 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
35 MODULE_DESCRIPTION(DRV_STRING);
36 MODULE_LICENSE("GPL v2");
37 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
39 static void otx2_vf_link_event_task(struct work_struct *work);
46 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
47 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
49 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
51 bool if_up = netif_running(netdev);
57 netdev_info(netdev, "Changing MTU from %d to %d\n",
58 netdev->mtu, new_mtu);
59 netdev->mtu = new_mtu;
62 err = otx2_open(netdev);
67 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
69 int irq, vfs = pf->total_vfs;
71 /* Disable VFs ME interrupts */
72 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
73 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
76 /* Disable VFs FLR interrupts */
77 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
78 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
84 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
85 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
88 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
89 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
93 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
97 destroy_workqueue(pf->flr_wq);
99 devm_kfree(pf->dev, pf->flr_wrk);
102 static void otx2_flr_handler(struct work_struct *work)
104 struct flr_work *flrwork = container_of(work, struct flr_work, work);
105 struct otx2_nic *pf = flrwork->pf;
106 struct mbox *mbox = &pf->mbox;
110 vf = flrwork - pf->flr_wrk;
112 mutex_lock(&mbox->lock);
113 req = otx2_mbox_alloc_msg_vf_flr(mbox);
115 mutex_unlock(&mbox->lock);
118 req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
119 req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
121 if (!otx2_sync_mbox_msg(&pf->mbox)) {
126 /* clear transcation pending bit */
127 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
128 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
131 mutex_unlock(&mbox->lock);
134 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
136 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
137 int reg, dev, vf, start_vf, num_reg = 1;
140 if (pf->total_vfs > 64)
143 for (reg = 0; reg < num_reg; reg++) {
144 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
148 for (vf = 0; vf < 64; vf++) {
149 if (!(intr & BIT_ULL(vf)))
152 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
153 /* Clear interrupt */
154 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
155 /* Disable the interrupt */
156 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
163 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
165 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
166 int vf, reg, num_reg = 1;
169 if (pf->total_vfs > 64)
172 for (reg = 0; reg < num_reg; reg++) {
173 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
176 for (vf = 0; vf < 64; vf++) {
177 if (!(intr & BIT_ULL(vf)))
179 /* clear trpend bit */
180 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
181 /* clear interrupt */
182 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
188 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
190 struct otx2_hw *hw = &pf->hw;
194 /* Register ME interrupt handler*/
195 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
196 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
197 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
198 otx2_pf_me_intr_handler, 0, irq_name, pf);
201 "RVUPF: IRQ registration failed for ME0\n");
204 /* Register FLR interrupt handler */
205 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
206 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
207 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
208 otx2_pf_flr_intr_handler, 0, irq_name, pf);
211 "RVUPF: IRQ registration failed for FLR0\n");
216 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
217 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
218 rvu_get_pf(pf->pcifunc));
219 ret = request_irq(pci_irq_vector
220 (pf->pdev, RVU_PF_INT_VEC_VFME1),
221 otx2_pf_me_intr_handler, 0, irq_name, pf);
224 "RVUPF: IRQ registration failed for ME1\n");
226 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
227 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
228 rvu_get_pf(pf->pcifunc));
229 ret = request_irq(pci_irq_vector
230 (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
231 otx2_pf_flr_intr_handler, 0, irq_name, pf);
234 "RVUPF: IRQ registration failed for FLR1\n");
239 /* Enable ME interrupt for all VFs*/
240 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
241 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
243 /* Enable FLR interrupt for all VFs*/
244 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
245 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
250 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
251 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
254 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
255 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
261 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
265 pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
266 WQ_UNBOUND | WQ_HIGHPRI, 1);
270 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
271 sizeof(struct flr_work), GFP_KERNEL);
273 destroy_workqueue(pf->flr_wq);
277 for (vf = 0; vf < num_vfs; vf++) {
278 pf->flr_wrk[vf].pf = pf;
279 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
285 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
286 int first, int mdevs, u64 intr, int type)
288 struct otx2_mbox_dev *mdev;
289 struct otx2_mbox *mbox;
290 struct mbox_hdr *hdr;
293 for (i = first; i < mdevs; i++) {
295 if (!(intr & BIT_ULL(i - first)))
299 mdev = &mbox->dev[i];
300 if (type == TYPE_PFAF)
301 otx2_sync_mbox_bbuf(mbox, i);
302 hdr = mdev->mbase + mbox->rx_start;
303 /* The hdr->num_msgs is set to zero immediately in the interrupt
304 * handler to ensure that it holds a correct value next time
305 * when the interrupt handler is called.
306 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
307 * pf>mbox.up_num_msgs holds the data for use in
308 * pfaf_mbox_up_handler.
311 mw[i].num_msgs = hdr->num_msgs;
313 if (type == TYPE_PFAF)
314 memset(mbox->hwbase + mbox->rx_start, 0,
315 ALIGN(sizeof(struct mbox_hdr),
318 queue_work(mbox_wq, &mw[i].mbox_wrk);
322 mdev = &mbox->dev[i];
323 if (type == TYPE_PFAF)
324 otx2_sync_mbox_bbuf(mbox, i);
325 hdr = mdev->mbase + mbox->rx_start;
327 mw[i].up_num_msgs = hdr->num_msgs;
329 if (type == TYPE_PFAF)
330 memset(mbox->hwbase + mbox->rx_start, 0,
331 ALIGN(sizeof(struct mbox_hdr),
334 queue_work(mbox_wq, &mw[i].mbox_up_wrk);
339 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
340 struct otx2_mbox *pfvf_mbox, void *bbuf_base,
343 struct otx2_mbox_dev *src_mdev = mdev;
346 /* Msgs are already copied, trigger VF's mbox irq */
349 offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
350 writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
352 /* Restore VF's mbox bounce buffer region address */
353 src_mdev->mbase = bbuf_base;
356 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
357 struct otx2_mbox *src_mbox,
358 int dir, int vf, int num_msgs)
360 struct otx2_mbox_dev *src_mdev, *dst_mdev;
361 struct mbox_hdr *mbox_hdr;
362 struct mbox_hdr *req_hdr;
363 struct mbox *dst_mbox;
366 if (dir == MBOX_DIR_PFAF) {
367 /* Set VF's mailbox memory as PF's bounce buffer memory, so
368 * that explicit copying of VF's msgs to PF=>AF mbox region
369 * and AF=>PF responses to VF's mbox region can be avoided.
371 src_mdev = &src_mbox->dev[vf];
372 mbox_hdr = src_mbox->hwbase +
373 src_mbox->rx_start + (vf * MBOX_SIZE);
375 dst_mbox = &pf->mbox;
376 dst_size = dst_mbox->mbox.tx_size -
377 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
378 /* Check if msgs fit into destination area and has valid size */
379 if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
382 dst_mdev = &dst_mbox->mbox.dev[0];
384 mutex_lock(&pf->mbox.lock);
385 dst_mdev->mbase = src_mdev->mbase;
386 dst_mdev->msg_size = mbox_hdr->msg_size;
387 dst_mdev->num_msgs = num_msgs;
388 err = otx2_sync_mbox_msg(dst_mbox);
391 "AF not responding to VF%d messages\n", vf);
392 /* restore PF mbase and exit */
393 dst_mdev->mbase = pf->mbox.bbuf_base;
394 mutex_unlock(&pf->mbox.lock);
397 /* At this point, all the VF messages sent to AF are acked
398 * with proper responses and responses are copied to VF
399 * mailbox hence raise interrupt to VF.
401 req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
402 dst_mbox->mbox.rx_start);
403 req_hdr->num_msgs = num_msgs;
405 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
406 pf->mbox.bbuf_base, vf);
407 mutex_unlock(&pf->mbox.lock);
408 } else if (dir == MBOX_DIR_PFVF_UP) {
409 src_mdev = &src_mbox->dev[0];
410 mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
411 req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
413 req_hdr->num_msgs = num_msgs;
415 dst_mbox = &pf->mbox_pfvf[0];
416 dst_size = dst_mbox->mbox_up.tx_size -
417 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
418 /* Check if msgs fit into destination area */
419 if (mbox_hdr->msg_size > dst_size)
422 dst_mdev = &dst_mbox->mbox_up.dev[vf];
423 dst_mdev->mbase = src_mdev->mbase;
424 dst_mdev->msg_size = mbox_hdr->msg_size;
425 dst_mdev->num_msgs = mbox_hdr->num_msgs;
426 err = otx2_sync_mbox_up_msg(dst_mbox, vf);
429 "VF%d is not responding to mailbox\n", vf);
432 } else if (dir == MBOX_DIR_VFPF_UP) {
433 req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
435 req_hdr->num_msgs = num_msgs;
436 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
438 pf->mbox_pfvf[vf].bbuf_base,
445 static void otx2_pfvf_mbox_handler(struct work_struct *work)
447 struct mbox_msghdr *msg = NULL;
448 int offset, vf_idx, id, err;
449 struct otx2_mbox_dev *mdev;
450 struct mbox_hdr *req_hdr;
451 struct otx2_mbox *mbox;
452 struct mbox *vf_mbox;
455 vf_mbox = container_of(work, struct mbox, mbox_wrk);
457 vf_idx = vf_mbox - pf->mbox_pfvf;
459 mbox = &pf->mbox_pfvf[0].mbox;
460 mdev = &mbox->dev[vf_idx];
461 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
463 offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
465 for (id = 0; id < vf_mbox->num_msgs; id++) {
466 msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
469 if (msg->sig != OTX2_MBOX_REQ_SIG)
472 /* Set VF's number in each of the msg */
473 msg->pcifunc &= RVU_PFVF_FUNC_MASK;
474 msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
475 offset = msg->next_msgoff;
477 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
484 otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
485 otx2_mbox_msg_send(mbox, vf_idx);
488 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
490 struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
491 struct otx2_nic *pf = vf_mbox->pfvf;
492 struct otx2_mbox_dev *mdev;
493 int offset, id, vf_idx = 0;
494 struct mbox_hdr *rsp_hdr;
495 struct mbox_msghdr *msg;
496 struct otx2_mbox *mbox;
498 vf_idx = vf_mbox - pf->mbox_pfvf;
499 mbox = &pf->mbox_pfvf[0].mbox_up;
500 mdev = &mbox->dev[vf_idx];
502 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
503 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
505 for (id = 0; id < vf_mbox->up_num_msgs; id++) {
506 msg = mdev->mbase + offset;
508 if (msg->id >= MBOX_MSG_MAX) {
510 "Mbox msg with unknown ID 0x%x\n", msg->id);
514 if (msg->sig != OTX2_MBOX_RSP_SIG) {
516 "Mbox msg with wrong signature %x, ID 0x%x\n",
522 case MBOX_MSG_CGX_LINK_EVENT:
527 "Mbox msg response has err %d, ID 0x%x\n",
533 offset = mbox->rx_start + msg->next_msgoff;
534 if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
535 __otx2_mbox_reset(mbox, 0);
540 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
542 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
543 int vfs = pf->total_vfs;
547 mbox = pf->mbox_pfvf;
548 /* Handle VF interrupts */
550 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
551 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
552 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
557 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
558 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
560 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
562 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
567 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
569 void __iomem *hwbase;
577 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
578 sizeof(struct mbox), GFP_KERNEL);
582 pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
583 WQ_UNBOUND | WQ_HIGHPRI |
585 if (!pf->mbox_pfvf_wq)
588 /* On CN10K platform, PF <-> VF mailbox region follows after
589 * PF <-> AF mailbox region.
591 if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
592 base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
595 base = readq((void __iomem *)((u64)pf->reg_base +
596 RVU_PF_VF_BAR4_ADDR));
598 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
604 mbox = &pf->mbox_pfvf[0];
605 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
606 MBOX_DIR_PFVF, numvfs);
610 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
611 MBOX_DIR_PFVF_UP, numvfs);
615 for (vf = 0; vf < numvfs; vf++) {
617 INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
618 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
628 destroy_workqueue(pf->mbox_pfvf_wq);
632 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
634 struct mbox *mbox = &pf->mbox_pfvf[0];
639 if (pf->mbox_pfvf_wq) {
640 destroy_workqueue(pf->mbox_pfvf_wq);
641 pf->mbox_pfvf_wq = NULL;
644 if (mbox->mbox.hwbase)
645 iounmap(mbox->mbox.hwbase);
647 otx2_mbox_destroy(&mbox->mbox);
650 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
652 /* Clear PF <=> VF mailbox IRQ */
653 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
654 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
656 /* Enable PF <=> VF mailbox IRQ */
657 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
660 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
665 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
669 /* Disable PF <=> VF mailbox IRQ */
670 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
671 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
673 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
674 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
675 free_irq(vector, pf);
678 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
679 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
680 free_irq(vector, pf);
684 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
686 struct otx2_hw *hw = &pf->hw;
690 /* Register MBOX0 interrupt handler */
691 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
693 snprintf(irq_name, NAME_SIZE,
694 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
696 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
697 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
698 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
701 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
706 /* Register MBOX1 interrupt handler */
707 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
709 snprintf(irq_name, NAME_SIZE,
710 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
712 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
713 err = request_irq(pci_irq_vector(pf->pdev,
714 RVU_PF_INT_VEC_VFPF_MBOX1),
715 otx2_pfvf_mbox_intr_handler,
719 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
724 otx2_enable_pfvf_mbox_intr(pf, numvfs);
729 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
730 struct mbox_msghdr *msg)
734 if (msg->id >= MBOX_MSG_MAX) {
736 "Mbox msg with unknown ID 0x%x\n", msg->id);
740 if (msg->sig != OTX2_MBOX_RSP_SIG) {
742 "Mbox msg with wrong signature %x, ID 0x%x\n",
747 /* message response heading VF */
748 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
750 struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
751 struct delayed_work *dwork;
754 case MBOX_MSG_NIX_LF_START_RX:
755 config->intf_down = false;
756 dwork = &config->link_event_work;
757 schedule_delayed_work(dwork, msecs_to_jiffies(100));
759 case MBOX_MSG_NIX_LF_STOP_RX:
760 config->intf_down = true;
769 pf->pcifunc = msg->pcifunc;
771 case MBOX_MSG_MSIX_OFFSET:
772 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
774 case MBOX_MSG_NPA_LF_ALLOC:
775 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
777 case MBOX_MSG_NIX_LF_ALLOC:
778 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
780 case MBOX_MSG_NIX_TXSCH_ALLOC:
781 mbox_handler_nix_txsch_alloc(pf,
782 (struct nix_txsch_alloc_rsp *)msg);
784 case MBOX_MSG_NIX_BP_ENABLE:
785 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
787 case MBOX_MSG_CGX_STATS:
788 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
790 case MBOX_MSG_CGX_FEC_STATS:
791 mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
796 "Mbox msg response has err %d, ID 0x%x\n",
802 static void otx2_pfaf_mbox_handler(struct work_struct *work)
804 struct otx2_mbox_dev *mdev;
805 struct mbox_hdr *rsp_hdr;
806 struct mbox_msghdr *msg;
807 struct otx2_mbox *mbox;
808 struct mbox *af_mbox;
812 af_mbox = container_of(work, struct mbox, mbox_wrk);
813 mbox = &af_mbox->mbox;
814 mdev = &mbox->dev[0];
815 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
817 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
820 for (id = 0; id < af_mbox->num_msgs; id++) {
821 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
822 otx2_process_pfaf_mbox_msg(pf, msg);
823 offset = mbox->rx_start + msg->next_msgoff;
824 if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
825 __otx2_mbox_reset(mbox, 0);
831 static void otx2_handle_link_event(struct otx2_nic *pf)
833 struct cgx_link_user_info *linfo = &pf->linfo;
834 struct net_device *netdev = pf->netdev;
836 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
837 linfo->link_up ? "UP" : "DOWN", linfo->speed,
838 linfo->full_duplex ? "Full" : "Half");
839 if (linfo->link_up) {
840 netif_carrier_on(netdev);
841 netif_tx_start_all_queues(netdev);
843 netif_tx_stop_all_queues(netdev);
844 netif_carrier_off(netdev);
848 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
849 struct cgx_link_info_msg *msg,
854 /* Copy the link info sent by AF */
855 pf->linfo = msg->link_info;
857 /* notify VFs about link event */
858 for (i = 0; i < pci_num_vf(pf->pdev); i++) {
859 struct otx2_vf_config *config = &pf->vf_configs[i];
860 struct delayed_work *dwork = &config->link_event_work;
862 if (config->intf_down)
865 schedule_delayed_work(dwork, msecs_to_jiffies(100));
868 /* interface has not been fully configured yet */
869 if (pf->flags & OTX2_FLAG_INTF_DOWN)
872 otx2_handle_link_event(pf);
876 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
877 struct mbox_msghdr *req)
879 /* Check if valid, if not reply with a invalid msg */
880 if (req->sig != OTX2_MBOX_REQ_SIG) {
881 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
886 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
888 struct _rsp_type *rsp; \
891 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
892 &pf->mbox.mbox_up, 0, \
893 sizeof(struct _rsp_type)); \
898 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
899 rsp->hdr.pcifunc = 0; \
902 err = otx2_mbox_up_handler_ ## _fn_name( \
903 pf, (struct _req_type *)req, rsp); \
910 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
916 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
918 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
919 struct otx2_mbox *mbox = &af_mbox->mbox_up;
920 struct otx2_mbox_dev *mdev = &mbox->dev[0];
921 struct otx2_nic *pf = af_mbox->pfvf;
922 int offset, id, devid = 0;
923 struct mbox_hdr *rsp_hdr;
924 struct mbox_msghdr *msg;
926 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
928 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
930 for (id = 0; id < af_mbox->up_num_msgs; id++) {
931 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
933 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
934 /* Skip processing VF's messages */
936 otx2_process_mbox_msg_up(pf, msg);
937 offset = mbox->rx_start + msg->next_msgoff;
940 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
941 MBOX_DIR_PFVF_UP, devid - 1,
942 af_mbox->up_num_msgs);
946 otx2_mbox_msg_send(mbox, 0);
949 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
951 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
955 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
959 trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
961 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
966 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
968 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
970 /* Disable AF => PF mailbox IRQ */
971 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
972 free_irq(vector, pf);
975 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
977 struct otx2_hw *hw = &pf->hw;
982 /* Register mailbox interrupt handler */
983 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
984 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
985 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
986 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
989 "RVUPF: IRQ registration failed for PFAF mbox irq\n");
993 /* Enable mailbox interrupt for msgs coming from AF.
994 * First clear to avoid spurious interrupts, if any.
996 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
997 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
1002 /* Check mailbox communication with AF */
1003 req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1005 otx2_disable_mbox_intr(pf);
1008 err = otx2_sync_mbox_msg(&pf->mbox);
1011 "AF not responding to mailbox, deferring probe\n");
1012 otx2_disable_mbox_intr(pf);
1013 return -EPROBE_DEFER;
1019 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1021 struct mbox *mbox = &pf->mbox;
1024 destroy_workqueue(pf->mbox_wq);
1028 if (mbox->mbox.hwbase)
1029 iounmap((void __iomem *)mbox->mbox.hwbase);
1031 otx2_mbox_destroy(&mbox->mbox);
1032 otx2_mbox_destroy(&mbox->mbox_up);
1035 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1037 struct mbox *mbox = &pf->mbox;
1038 void __iomem *hwbase;
1042 pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
1043 WQ_UNBOUND | WQ_HIGHPRI |
1048 /* Mailbox is a reserved memory (in RAM) region shared between
1049 * admin function (i.e AF) and this PF, shouldn't be mapped as
1050 * device memory to allow unaligned accesses.
1052 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1055 dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1060 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1065 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1066 MBOX_DIR_PFAF_UP, 1);
1070 err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1074 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1075 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1076 mutex_init(&mbox->lock);
1080 otx2_pfaf_mbox_destroy(pf);
1084 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1086 struct msg_req *msg;
1089 mutex_lock(&pf->mbox.lock);
1091 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1093 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1096 mutex_unlock(&pf->mbox.lock);
1100 err = otx2_sync_mbox_msg(&pf->mbox);
1101 mutex_unlock(&pf->mbox.lock);
1105 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1107 struct msg_req *msg;
1110 if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
1111 pf->flow_cfg->dmacflt_max_flows))
1112 netdev_warn(pf->netdev,
1113 "CGX/RPM internal loopback might not work as DMAC filters are active\n");
1115 mutex_lock(&pf->mbox.lock);
1117 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1119 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1122 mutex_unlock(&pf->mbox.lock);
1126 err = otx2_sync_mbox_msg(&pf->mbox);
1127 mutex_unlock(&pf->mbox.lock);
1131 int otx2_set_real_num_queues(struct net_device *netdev,
1132 int tx_queues, int rx_queues)
1136 err = netif_set_real_num_tx_queues(netdev, tx_queues);
1139 "Failed to set no of Tx queues: %d\n", tx_queues);
1143 err = netif_set_real_num_rx_queues(netdev, rx_queues);
1146 "Failed to set no of Rx queues: %d\n", rx_queues);
1149 EXPORT_SYMBOL(otx2_set_real_num_queues);
1151 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1153 struct otx2_nic *pf = data;
1158 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1159 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1160 val = otx2_atomic64_add((qidx << 44), ptr);
1162 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1163 (val & NIX_CQERRINT_BITS));
1164 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1167 if (val & BIT_ULL(42)) {
1168 netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1169 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1171 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1172 netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1174 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1175 netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1179 schedule_work(&pf->reset_task);
1183 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1184 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1185 val = otx2_atomic64_add((qidx << 44), ptr);
1186 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1187 (val & NIX_SQINT_BITS));
1189 if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
1192 if (val & BIT_ULL(42)) {
1193 netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1194 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1196 if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
1197 netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
1200 NIX_LF_SQ_OP_ERR_DBG));
1201 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
1204 if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
1205 netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
1207 otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
1208 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
1211 if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
1212 netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
1215 NIX_LF_SEND_ERR_DBG));
1216 otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
1219 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1220 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1224 schedule_work(&pf->reset_task);
1230 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1232 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1233 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1234 int qidx = cq_poll->cint_idx;
1236 /* Disable interrupts.
1238 * Completion interrupts behave in a level-triggered interrupt
1239 * fashion, and hence have to be cleared only after it is serviced.
1241 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1244 napi_schedule_irqoff(&cq_poll->napi);
1249 static void otx2_disable_napi(struct otx2_nic *pf)
1251 struct otx2_qset *qset = &pf->qset;
1252 struct otx2_cq_poll *cq_poll;
1255 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1256 cq_poll = &qset->napi[qidx];
1257 napi_disable(&cq_poll->napi);
1258 netif_napi_del(&cq_poll->napi);
1262 static void otx2_free_cq_res(struct otx2_nic *pf)
1264 struct otx2_qset *qset = &pf->qset;
1265 struct otx2_cq_queue *cq;
1269 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1270 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1271 cq = &qset->cq[qidx];
1272 qmem_free(pf->dev, cq->cqe);
1276 static void otx2_free_sq_res(struct otx2_nic *pf)
1278 struct otx2_qset *qset = &pf->qset;
1279 struct otx2_snd_queue *sq;
1283 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1284 /* Free SQB pointers */
1285 otx2_sq_free_sqbs(pf);
1286 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1287 sq = &qset->sq[qidx];
1288 qmem_free(pf->dev, sq->sqe);
1289 qmem_free(pf->dev, sq->tso_hdrs);
1291 kfree(sq->sqb_ptrs);
1295 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1301 /* The data transferred by NIX to memory consists of actual packet
1302 * plus additional data which has timestamp and/or EDSA/HIGIG2
1303 * headers if interface is configured in corresponding modes.
1304 * NIX transfers entire data using 6 segments/buffers and writes
1305 * a CQE_RX descriptor with those segment addresses. First segment
1306 * has additional data prepended to packet. Also software omits a
1307 * headroom of 128 bytes and sizeof(struct skb_shared_info) in
1308 * each segment. Hence the total size of memory needed
1309 * to receive a packet with 'mtu' is:
1310 * frame size = mtu + additional data;
1311 * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
1312 * each receive buffer size = memory / 6;
1314 frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1315 total_size = frame_size + (OTX2_HEAD_ROOM +
1316 OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
1317 rbuf_size = total_size / 6;
1319 return ALIGN(rbuf_size, 2048);
1322 static int otx2_init_hw_resources(struct otx2_nic *pf)
1324 struct nix_lf_free_req *free_req;
1325 struct mbox *mbox = &pf->mbox;
1326 struct otx2_hw *hw = &pf->hw;
1327 struct msg_req *req;
1330 /* Set required NPA LF's pool counts
1331 * Auras and Pools are used in a 1:1 mapping,
1332 * so, aura count = pool count.
1334 hw->rqpool_cnt = hw->rx_queues;
1335 hw->sqpool_cnt = hw->tx_queues;
1336 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1338 pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1340 pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1342 mutex_lock(&mbox->lock);
1344 err = otx2_config_npa(pf);
1349 err = otx2_config_nix(pf);
1351 goto err_free_npa_lf;
1353 /* Enable backpressure */
1354 otx2_nix_config_bp(pf, true);
1356 /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1357 err = otx2_rq_aura_pool_init(pf);
1359 mutex_unlock(&mbox->lock);
1360 goto err_free_nix_lf;
1362 /* Init Auras and pools used by NIX SQ, for queueing SQEs */
1363 err = otx2_sq_aura_pool_init(pf);
1365 mutex_unlock(&mbox->lock);
1366 goto err_free_rq_ptrs;
1369 err = otx2_txsch_alloc(pf);
1371 mutex_unlock(&mbox->lock);
1372 goto err_free_sq_ptrs;
1375 err = otx2_config_nix_queues(pf);
1377 mutex_unlock(&mbox->lock);
1378 goto err_free_txsch;
1380 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1381 err = otx2_txschq_config(pf, lvl);
1383 mutex_unlock(&mbox->lock);
1384 goto err_free_nix_queues;
1387 mutex_unlock(&mbox->lock);
1390 err_free_nix_queues:
1391 otx2_free_sq_res(pf);
1392 otx2_free_cq_res(pf);
1393 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1395 if (otx2_txschq_stop(pf))
1396 dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
1398 otx2_sq_free_sqbs(pf);
1400 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1401 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1402 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1403 otx2_aura_pool_free(pf);
1405 mutex_lock(&mbox->lock);
1406 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1408 free_req->flags = NIX_LF_DISABLE_FLOWS;
1409 if (otx2_sync_mbox_msg(mbox))
1410 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1414 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1416 if (otx2_sync_mbox_msg(mbox))
1417 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1420 mutex_unlock(&mbox->lock);
1424 static void otx2_free_hw_resources(struct otx2_nic *pf)
1426 struct otx2_qset *qset = &pf->qset;
1427 struct nix_lf_free_req *free_req;
1428 struct mbox *mbox = &pf->mbox;
1429 struct otx2_cq_queue *cq;
1430 struct msg_req *req;
1433 /* Ensure all SQE are processed */
1436 /* Stop transmission */
1437 err = otx2_txschq_stop(pf);
1439 dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
1441 mutex_lock(&mbox->lock);
1442 /* Disable backpressure */
1443 if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1444 otx2_nix_config_bp(pf, false);
1445 mutex_unlock(&mbox->lock);
1448 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1450 /*Dequeue all CQEs */
1451 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1452 cq = &qset->cq[qidx];
1453 if (cq->cq_type == CQ_RX)
1454 otx2_cleanup_rx_cqes(pf, cq);
1456 otx2_cleanup_tx_cqes(pf, cq);
1459 otx2_free_sq_res(pf);
1461 /* Free RQ buffer pointers*/
1462 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1464 otx2_free_cq_res(pf);
1466 /* Free all ingress bandwidth profiles allocated */
1467 cn10k_free_all_ipolicers(pf);
1469 mutex_lock(&mbox->lock);
1471 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1473 free_req->flags = NIX_LF_DISABLE_FLOWS;
1474 if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1475 free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1476 if (otx2_sync_mbox_msg(mbox))
1477 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1479 mutex_unlock(&mbox->lock);
1481 /* Disable NPA Pool and Aura hw context */
1482 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1483 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1484 otx2_aura_pool_free(pf);
1486 mutex_lock(&mbox->lock);
1488 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1490 if (otx2_sync_mbox_msg(mbox))
1491 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1493 mutex_unlock(&mbox->lock);
1496 int otx2_open(struct net_device *netdev)
1498 struct otx2_nic *pf = netdev_priv(netdev);
1499 struct otx2_cq_poll *cq_poll = NULL;
1500 struct otx2_qset *qset = &pf->qset;
1501 int err = 0, qidx, vec;
1504 netif_carrier_off(netdev);
1506 pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
1507 /* RQ and SQs are mapped to different CQs,
1508 * so find out max CQ IRQs (i.e CINTs) needed.
1510 pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
1511 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1516 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1518 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1521 qset->cq = kcalloc(pf->qset.cq_cnt,
1522 sizeof(struct otx2_cq_queue), GFP_KERNEL);
1526 qset->sq = kcalloc(pf->hw.tx_queues,
1527 sizeof(struct otx2_snd_queue), GFP_KERNEL);
1531 qset->rq = kcalloc(pf->hw.rx_queues,
1532 sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1536 err = otx2_init_hw_resources(pf);
1540 /* Register NAPI handler */
1541 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1542 cq_poll = &qset->napi[qidx];
1543 cq_poll->cint_idx = qidx;
1544 /* RQ0 & SQ0 are mapped to CINT0 and so on..
1545 * 'cq_ids[0]' points to RQ's CQ and
1546 * 'cq_ids[1]' points to SQ's CQ and
1548 cq_poll->cq_ids[CQ_RX] =
1549 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1550 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1551 qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1552 cq_poll->dev = (void *)pf;
1553 netif_napi_add(netdev, &cq_poll->napi,
1554 otx2_napi_handler, NAPI_POLL_WEIGHT);
1555 napi_enable(&cq_poll->napi);
1558 /* Set maximum frame size allowed in HW */
1559 err = otx2_hw_set_mtu(pf, netdev->mtu);
1561 goto err_disable_napi;
1563 /* Setup segmentation algorithms, if failed, clear offload capability */
1564 otx2_setup_segmentation(pf);
1566 /* Initialize RSS */
1567 err = otx2_rss_init(pf);
1569 goto err_disable_napi;
1571 /* Register Queue IRQ handlers */
1572 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1573 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1575 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1577 err = request_irq(pci_irq_vector(pf->pdev, vec),
1578 otx2_q_intr_handler, 0, irq_name, pf);
1581 "RVUPF%d: IRQ registration failed for QERR\n",
1582 rvu_get_pf(pf->pcifunc));
1583 goto err_disable_napi;
1586 /* Enable QINT IRQ */
1587 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1589 /* Register CQ IRQ handlers */
1590 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1591 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1592 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1594 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
1597 err = request_irq(pci_irq_vector(pf->pdev, vec),
1598 otx2_cq_intr_handler, 0, irq_name,
1602 "RVUPF%d: IRQ registration failed for CQ%d\n",
1603 rvu_get_pf(pf->pcifunc), qidx);
1604 goto err_free_cints;
1608 otx2_config_irq_coalescing(pf, qidx);
1611 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1612 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1615 otx2_set_cints_affinity(pf);
1617 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1618 otx2_enable_rxvlan(pf, true);
1620 /* When reinitializing enable time stamping if it is enabled before */
1621 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1622 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1623 otx2_config_hw_tx_tstamp(pf, true);
1625 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1626 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1627 otx2_config_hw_rx_tstamp(pf, true);
1630 pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1631 /* 'intf_down' may be checked on any cpu */
1634 /* we have already received link status notification */
1635 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1636 otx2_handle_link_event(pf);
1638 /* Restore pause frame settings */
1639 otx2_config_pause_frm(pf);
1641 /* Install DMAC Filters */
1642 if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
1643 otx2_dmacflt_reinstall_flows(pf);
1645 err = otx2_rxtx_enable(pf, true);
1647 goto err_tx_stop_queues;
1652 netif_tx_stop_all_queues(netdev);
1653 netif_carrier_off(netdev);
1654 pf->flags |= OTX2_FLAG_INTF_DOWN;
1656 otx2_free_cints(pf, qidx);
1657 vec = pci_irq_vector(pf->pdev,
1658 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1659 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1660 synchronize_irq(vec);
1663 otx2_disable_napi(pf);
1664 otx2_free_hw_resources(pf);
1672 EXPORT_SYMBOL(otx2_open);
1674 int otx2_stop(struct net_device *netdev)
1676 struct otx2_nic *pf = netdev_priv(netdev);
1677 struct otx2_cq_poll *cq_poll = NULL;
1678 struct otx2_qset *qset = &pf->qset;
1679 struct otx2_rss_info *rss;
1682 /* If the DOWN flag is set resources are already freed */
1683 if (pf->flags & OTX2_FLAG_INTF_DOWN)
1686 netif_carrier_off(netdev);
1687 netif_tx_stop_all_queues(netdev);
1689 pf->flags |= OTX2_FLAG_INTF_DOWN;
1690 /* 'intf_down' may be checked on any cpu */
1693 /* First stop packet Rx/Tx */
1694 otx2_rxtx_enable(pf, false);
1696 /* Clear RSS enable flag */
1697 rss = &pf->hw.rss_info;
1698 rss->enable = false;
1700 /* Cleanup Queue IRQ */
1701 vec = pci_irq_vector(pf->pdev,
1702 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1703 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1704 synchronize_irq(vec);
1707 /* Cleanup CQ NAPI and IRQ */
1708 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1709 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1710 /* Disable interrupt */
1711 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1713 synchronize_irq(pci_irq_vector(pf->pdev, vec));
1715 cq_poll = &qset->napi[qidx];
1716 napi_synchronize(&cq_poll->napi);
1720 netif_tx_disable(netdev);
1722 otx2_free_hw_resources(pf);
1723 otx2_free_cints(pf, pf->hw.cint_cnt);
1724 otx2_disable_napi(pf);
1726 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1727 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1729 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1730 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1731 devm_kfree(pf->dev, pf->refill_wrk);
1737 /* Do not clear RQ/SQ ringsize settings */
1738 memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
1739 sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
1742 EXPORT_SYMBOL(otx2_stop);
1744 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
1746 struct otx2_nic *pf = netdev_priv(netdev);
1747 int qidx = skb_get_queue_mapping(skb);
1748 struct otx2_snd_queue *sq;
1749 struct netdev_queue *txq;
1751 /* Check for minimum and maximum packet length */
1752 if (skb->len <= ETH_HLEN ||
1753 (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
1755 return NETDEV_TX_OK;
1758 sq = &pf->qset.sq[qidx];
1759 txq = netdev_get_tx_queue(netdev, qidx);
1761 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1762 netif_tx_stop_queue(txq);
1764 /* Check again, incase SQBs got freed up */
1766 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1768 netif_tx_wake_queue(txq);
1770 return NETDEV_TX_BUSY;
1773 return NETDEV_TX_OK;
1776 static netdev_features_t otx2_fix_features(struct net_device *dev,
1777 netdev_features_t features)
1779 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1780 features |= NETIF_F_HW_VLAN_STAG_RX;
1782 features &= ~NETIF_F_HW_VLAN_STAG_RX;
1787 static void otx2_set_rx_mode(struct net_device *netdev)
1789 struct otx2_nic *pf = netdev_priv(netdev);
1791 queue_work(pf->otx2_wq, &pf->rx_mode_work);
1794 static void otx2_do_set_rx_mode(struct work_struct *work)
1796 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
1797 struct net_device *netdev = pf->netdev;
1798 struct nix_rx_mode *req;
1799 bool promisc = false;
1801 if (!(netdev->flags & IFF_UP))
1804 if ((netdev->flags & IFF_PROMISC) ||
1805 (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
1809 /* Write unicast address to mcam entries or del from mcam */
1810 if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1811 __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1813 mutex_lock(&pf->mbox.lock);
1814 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1816 mutex_unlock(&pf->mbox.lock);
1820 req->mode = NIX_RX_MODE_UCAST;
1823 req->mode |= NIX_RX_MODE_PROMISC;
1824 if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1825 req->mode |= NIX_RX_MODE_ALLMULTI;
1827 req->mode |= NIX_RX_MODE_USE_MCE;
1829 otx2_sync_mbox_msg(&pf->mbox);
1830 mutex_unlock(&pf->mbox.lock);
1833 static int otx2_set_features(struct net_device *netdev,
1834 netdev_features_t features)
1836 netdev_features_t changed = features ^ netdev->features;
1837 bool ntuple = !!(features & NETIF_F_NTUPLE);
1838 struct otx2_nic *pf = netdev_priv(netdev);
1839 bool tc = !!(features & NETIF_F_HW_TC);
1841 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1842 return otx2_cgx_config_loopback(pf,
1843 features & NETIF_F_LOOPBACK);
1845 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
1846 return otx2_enable_rxvlan(pf,
1847 features & NETIF_F_HW_VLAN_CTAG_RX);
1849 if ((changed & NETIF_F_NTUPLE) && !ntuple)
1850 otx2_destroy_ntuple_flows(pf);
1852 if ((changed & NETIF_F_NTUPLE) && ntuple) {
1853 if (!pf->flow_cfg->max_flows) {
1855 "Can't enable NTUPLE, MCAM entries not allocated\n");
1860 if ((changed & NETIF_F_HW_TC) && tc) {
1861 if (!pf->flow_cfg->max_flows) {
1863 "Can't enable TC, MCAM entries not allocated\n");
1868 if ((changed & NETIF_F_HW_TC) && !tc &&
1869 pf->flow_cfg && pf->flow_cfg->nr_flows) {
1870 netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
1874 if ((changed & NETIF_F_NTUPLE) && ntuple &&
1875 (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
1877 "Can't enable NTUPLE when TC is active, disable TC and retry\n");
1881 if ((changed & NETIF_F_HW_TC) && tc &&
1882 (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
1884 "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
1891 static void otx2_reset_task(struct work_struct *work)
1893 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1895 if (!netif_running(pf->netdev))
1899 otx2_stop(pf->netdev);
1901 otx2_open(pf->netdev);
1902 netif_trans_update(pf->netdev);
1906 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
1908 struct msg_req *req;
1911 if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
1914 mutex_lock(&pfvf->mbox.lock);
1916 req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
1918 req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
1920 mutex_unlock(&pfvf->mbox.lock);
1924 err = otx2_sync_mbox_msg(&pfvf->mbox);
1926 mutex_unlock(&pfvf->mbox.lock);
1930 mutex_unlock(&pfvf->mbox.lock);
1932 pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
1934 pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1938 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
1940 struct msg_req *req;
1943 if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
1946 mutex_lock(&pfvf->mbox.lock);
1948 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
1950 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
1952 mutex_unlock(&pfvf->mbox.lock);
1956 err = otx2_sync_mbox_msg(&pfvf->mbox);
1958 mutex_unlock(&pfvf->mbox.lock);
1962 mutex_unlock(&pfvf->mbox.lock);
1964 pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
1966 pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1970 static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1972 struct otx2_nic *pfvf = netdev_priv(netdev);
1973 struct hwtstamp_config config;
1978 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1981 /* reserved for future extensions */
1985 switch (config.tx_type) {
1986 case HWTSTAMP_TX_OFF:
1987 otx2_config_hw_tx_tstamp(pfvf, false);
1989 case HWTSTAMP_TX_ON:
1990 otx2_config_hw_tx_tstamp(pfvf, true);
1996 switch (config.rx_filter) {
1997 case HWTSTAMP_FILTER_NONE:
1998 otx2_config_hw_rx_tstamp(pfvf, false);
2000 case HWTSTAMP_FILTER_ALL:
2001 case HWTSTAMP_FILTER_SOME:
2002 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2003 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2004 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2005 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2006 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2007 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2008 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2009 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2010 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2011 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2012 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2013 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2014 otx2_config_hw_rx_tstamp(pfvf, true);
2015 config.rx_filter = HWTSTAMP_FILTER_ALL;
2021 memcpy(&pfvf->tstamp, &config, sizeof(config));
2023 return copy_to_user(ifr->ifr_data, &config,
2024 sizeof(config)) ? -EFAULT : 0;
2027 static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
2029 struct otx2_nic *pfvf = netdev_priv(netdev);
2030 struct hwtstamp_config *cfg = &pfvf->tstamp;
2034 return otx2_config_hwtstamp(netdev, req);
2036 return copy_to_user(req->ifr_data, cfg,
2037 sizeof(*cfg)) ? -EFAULT : 0;
2043 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2045 struct npc_install_flow_req *req;
2048 mutex_lock(&pf->mbox.lock);
2049 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2055 ether_addr_copy(req->packet.dmac, mac);
2056 eth_broadcast_addr((u8 *)&req->mask.dmac);
2057 req->features = BIT_ULL(NPC_DMAC);
2058 req->channel = pf->hw.rx_chan_base;
2059 req->intf = NIX_INTF_RX;
2060 req->default_rule = 1;
2063 req->op = NIX_RX_ACTION_DEFAULT;
2065 err = otx2_sync_mbox_msg(&pf->mbox);
2067 mutex_unlock(&pf->mbox.lock);
2071 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2073 struct otx2_nic *pf = netdev_priv(netdev);
2074 struct pci_dev *pdev = pf->pdev;
2075 struct otx2_vf_config *config;
2078 if (!netif_running(netdev))
2081 if (vf >= pf->total_vfs)
2084 if (!is_valid_ether_addr(mac))
2087 config = &pf->vf_configs[vf];
2088 ether_addr_copy(config->mac, mac);
2090 ret = otx2_do_set_vf_mac(pf, vf, mac);
2092 dev_info(&pdev->dev,
2093 "Load/Reload VF driver\n");
2098 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2101 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2102 struct nix_vtag_config_rsp *vtag_rsp;
2103 struct npc_delete_flow_req *del_req;
2104 struct nix_vtag_config *vtag_req;
2105 struct npc_install_flow_req *req;
2106 struct otx2_vf_config *config;
2110 config = &pf->vf_configs[vf];
2112 if (!vlan && !config->vlan)
2115 mutex_lock(&pf->mbox.lock);
2117 /* free old tx vtag entry */
2119 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2124 vtag_req->cfg_type = 0;
2125 vtag_req->tx.free_vtag0 = 1;
2126 vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2128 err = otx2_sync_mbox_msg(&pf->mbox);
2133 if (!vlan && config->vlan) {
2135 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2140 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2142 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2143 err = otx2_sync_mbox_msg(&pf->mbox);
2148 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2153 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2155 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2156 err = otx2_sync_mbox_msg(&pf->mbox);
2162 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2168 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2169 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2170 req->packet.vlan_tci = htons(vlan);
2171 req->mask.vlan_tci = htons(VLAN_VID_MASK);
2172 /* af fills the destination mac addr */
2173 eth_broadcast_addr((u8 *)&req->mask.dmac);
2174 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2175 req->channel = pf->hw.rx_chan_base;
2176 req->intf = NIX_INTF_RX;
2178 req->op = NIX_RX_ACTION_DEFAULT;
2179 req->vtag0_valid = true;
2180 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2183 err = otx2_sync_mbox_msg(&pf->mbox);
2188 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2194 /* configure tx vtag params */
2195 vtag_req->vtag_size = VTAGSIZE_T4;
2196 vtag_req->cfg_type = 0; /* tx vlan cfg */
2197 vtag_req->tx.cfg_vtag0 = 1;
2198 vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2200 err = otx2_sync_mbox_msg(&pf->mbox);
2204 vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2205 (&pf->mbox.mbox, 0, &vtag_req->hdr);
2206 if (IS_ERR(vtag_rsp)) {
2207 err = PTR_ERR(vtag_rsp);
2210 config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2212 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2218 eth_zero_addr((u8 *)&req->mask.dmac);
2219 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2220 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2221 req->features = BIT_ULL(NPC_DMAC);
2222 req->channel = pf->hw.tx_chan_base;
2223 req->intf = NIX_INTF_TX;
2225 req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2226 req->vtag0_def = vtag_rsp->vtag0_idx;
2227 req->vtag0_op = VTAG_INSERT;
2230 err = otx2_sync_mbox_msg(&pf->mbox);
2232 config->vlan = vlan;
2233 mutex_unlock(&pf->mbox.lock);
2237 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2240 struct otx2_nic *pf = netdev_priv(netdev);
2241 struct pci_dev *pdev = pf->pdev;
2243 if (!netif_running(netdev))
2246 if (vf >= pci_num_vf(pdev))
2249 /* qos is currently unsupported */
2250 if (vlan >= VLAN_N_VID || qos)
2253 if (proto != htons(ETH_P_8021Q))
2254 return -EPROTONOSUPPORT;
2256 if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2259 return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2262 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2263 struct ifla_vf_info *ivi)
2265 struct otx2_nic *pf = netdev_priv(netdev);
2266 struct pci_dev *pdev = pf->pdev;
2267 struct otx2_vf_config *config;
2269 if (!netif_running(netdev))
2272 if (vf >= pci_num_vf(pdev))
2275 config = &pf->vf_configs[vf];
2277 ether_addr_copy(ivi->mac, config->mac);
2278 ivi->vlan = config->vlan;
2279 ivi->trusted = config->trusted;
2284 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
2287 struct set_vf_perm *req;
2290 mutex_lock(&pf->mbox.lock);
2291 req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
2297 /* Let AF reset VF permissions as sriov is disabled */
2298 if (req_perm == OTX2_RESET_VF_PERM) {
2299 req->flags |= RESET_VF_PERM;
2300 } else if (req_perm == OTX2_TRUSTED_VF) {
2301 if (pf->vf_configs[vf].trusted)
2302 req->flags |= VF_TRUSTED;
2306 rc = otx2_sync_mbox_msg(&pf->mbox);
2308 mutex_unlock(&pf->mbox.lock);
2312 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
2315 struct otx2_nic *pf = netdev_priv(netdev);
2316 struct pci_dev *pdev = pf->pdev;
2319 if (vf >= pci_num_vf(pdev))
2322 if (pf->vf_configs[vf].trusted == enable)
2325 pf->vf_configs[vf].trusted = enable;
2326 rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
2329 pf->vf_configs[vf].trusted = !enable;
2331 netdev_info(pf->netdev, "VF %d is %strusted\n",
2332 vf, enable ? "" : "not ");
2336 static const struct net_device_ops otx2_netdev_ops = {
2337 .ndo_open = otx2_open,
2338 .ndo_stop = otx2_stop,
2339 .ndo_start_xmit = otx2_xmit,
2340 .ndo_fix_features = otx2_fix_features,
2341 .ndo_set_mac_address = otx2_set_mac_address,
2342 .ndo_change_mtu = otx2_change_mtu,
2343 .ndo_set_rx_mode = otx2_set_rx_mode,
2344 .ndo_set_features = otx2_set_features,
2345 .ndo_tx_timeout = otx2_tx_timeout,
2346 .ndo_get_stats64 = otx2_get_stats64,
2347 .ndo_eth_ioctl = otx2_ioctl,
2348 .ndo_set_vf_mac = otx2_set_vf_mac,
2349 .ndo_set_vf_vlan = otx2_set_vf_vlan,
2350 .ndo_get_vf_config = otx2_get_vf_config,
2351 .ndo_setup_tc = otx2_setup_tc,
2352 .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
2355 static int otx2_wq_init(struct otx2_nic *pf)
2357 pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2361 INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
2362 INIT_WORK(&pf->reset_task, otx2_reset_task);
2366 static int otx2_check_pf_usable(struct otx2_nic *nic)
2370 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2371 rev = (rev >> 12) & 0xFF;
2372 /* Check if AF has setup revision for RVUM block,
2373 * otherwise this driver probe should be deferred
2374 * until AF driver comes up.
2378 "AF is not initialized, deferring probe\n");
2379 return -EPROBE_DEFER;
2384 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2386 struct otx2_hw *hw = &pf->hw;
2389 /* NPA interrupts are inot registered, so alloc only
2390 * upto NIX vector offset.
2392 num_vec = hw->nix_msixoff;
2393 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2395 otx2_disable_mbox_intr(pf);
2396 pci_free_irq_vectors(hw->pdev);
2397 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2399 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2404 return otx2_register_mbox_intr(pf, false);
2407 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
2411 pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
2412 sizeof(struct otx2_vf_config),
2414 if (!pf->vf_configs)
2417 for (i = 0; i < pf->total_vfs; i++) {
2418 pf->vf_configs[i].pf = pf;
2419 pf->vf_configs[i].intf_down = true;
2420 pf->vf_configs[i].trusted = false;
2421 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2422 otx2_vf_link_event_task);
2428 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
2432 if (!pf->vf_configs)
2435 for (i = 0; i < pf->total_vfs; i++) {
2436 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2437 otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
2441 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2443 struct device *dev = &pdev->dev;
2444 struct net_device *netdev;
2445 struct otx2_nic *pf;
2450 err = pcim_enable_device(pdev);
2452 dev_err(dev, "Failed to enable PCI device\n");
2456 err = pci_request_regions(pdev, DRV_NAME);
2458 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2462 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2464 dev_err(dev, "DMA mask config failed, abort\n");
2465 goto err_release_regions;
2468 pci_set_master(pdev);
2470 /* Set number of queues */
2471 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2473 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
2476 goto err_release_regions;
2479 pci_set_drvdata(pdev, netdev);
2480 SET_NETDEV_DEV(netdev, &pdev->dev);
2481 pf = netdev_priv(netdev);
2482 pf->netdev = netdev;
2485 pf->total_vfs = pci_sriov_get_totalvfs(pdev);
2486 pf->flags |= OTX2_FLAG_INTF_DOWN;
2490 hw->rx_queues = qcount;
2491 hw->tx_queues = qcount;
2492 hw->max_queues = qcount;
2494 num_vec = pci_msix_vec_count(pdev);
2495 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2497 if (!hw->irq_name) {
2499 goto err_free_netdev;
2502 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2503 sizeof(cpumask_var_t), GFP_KERNEL);
2504 if (!hw->affinity_mask) {
2506 goto err_free_netdev;
2510 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2511 if (!pf->reg_base) {
2512 dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2514 goto err_free_netdev;
2517 err = otx2_check_pf_usable(pf);
2519 goto err_free_netdev;
2521 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2522 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2524 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2526 goto err_free_netdev;
2529 otx2_setup_dev_hw_settings(pf);
2531 /* Init PF <=> AF mailbox stuff */
2532 err = otx2_pfaf_mbox_init(pf);
2534 goto err_free_irq_vectors;
2536 /* Register mailbox interrupt */
2537 err = otx2_register_mbox_intr(pf, true);
2539 goto err_mbox_destroy;
2541 /* Request AF to attach NPA and NIX LFs to this PF.
2542 * NIX and NPA LFs are needed for this PF to function as a NIC.
2544 err = otx2_attach_npa_nix(pf);
2546 goto err_disable_mbox_intr;
2548 err = otx2_realloc_msix_vectors(pf);
2550 goto err_detach_rsrc;
2552 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
2554 goto err_detach_rsrc;
2556 err = cn10k_lmtst_init(pf);
2558 goto err_detach_rsrc;
2560 /* Assign default mac address */
2561 otx2_get_mac_from_af(netdev);
2563 /* Don't check for error. Proceed without ptp */
2566 /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
2567 * HW allocates buffer pointer from stack and uses it for DMA'ing
2568 * ingress packet. In some scenarios HW can free back allocated buffer
2569 * pointers to pool. This makes it impossible for SW to maintain a
2570 * parallel list where physical addresses of buffer pointers (IOVAs)
2571 * given to HW can be saved for later reference.
2573 * So the only way to convert Rx packet's buffer address is to use
2574 * IOMMU's iova_to_phys() handler which translates the address by
2575 * walking through the translation tables.
2577 pf->iommu_domain = iommu_get_domain_for_dev(dev);
2579 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2580 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
2581 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2582 NETIF_F_GSO_UDP_L4);
2583 netdev->features |= netdev->hw_features;
2585 err = otx2_mcam_flow_init(pf);
2587 goto err_ptp_destroy;
2589 if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
2590 netdev->hw_features |= NETIF_F_NTUPLE;
2592 if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
2593 netdev->priv_flags |= IFF_UNICAST_FLT;
2595 /* Support TSO on tag interface */
2596 netdev->vlan_features |= netdev->features;
2597 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
2598 NETIF_F_HW_VLAN_STAG_TX;
2599 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
2600 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
2601 NETIF_F_HW_VLAN_STAG_RX;
2602 netdev->features |= netdev->hw_features;
2604 /* HW supports tc offload but mutually exclusive with n-tuple filters */
2605 if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
2606 netdev->hw_features |= NETIF_F_HW_TC;
2608 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
2610 netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
2611 netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
2613 netdev->netdev_ops = &otx2_netdev_ops;
2615 netdev->min_mtu = OTX2_MIN_MTU;
2616 netdev->max_mtu = otx2_get_max_mtu(pf);
2618 err = register_netdev(netdev);
2620 dev_err(dev, "Failed to register netdevice\n");
2621 goto err_del_mcam_entries;
2624 err = otx2_wq_init(pf);
2626 goto err_unreg_netdev;
2628 otx2_set_ethtool_ops(netdev);
2630 err = otx2_init_tc(pf);
2632 goto err_mcam_flow_del;
2634 err = otx2_register_dl(pf);
2636 goto err_mcam_flow_del;
2638 /* Initialize SR-IOV resources */
2639 err = otx2_sriov_vfcfg_init(pf);
2641 goto err_pf_sriov_init;
2643 /* Enable link notifications */
2644 otx2_cgx_config_linkevents(pf, true);
2646 /* Enable pause frames by default */
2647 pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
2648 pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
2653 otx2_shutdown_tc(pf);
2655 otx2_mcam_flow_del(pf);
2657 unregister_netdev(netdev);
2658 err_del_mcam_entries:
2659 otx2_mcam_flow_del(pf);
2661 otx2_ptp_destroy(pf);
2663 if (pf->hw.lmt_info)
2664 free_percpu(pf->hw.lmt_info);
2665 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
2666 qmem_free(pf->dev, pf->dync_lmt);
2667 otx2_detach_resources(&pf->mbox);
2668 err_disable_mbox_intr:
2669 otx2_disable_mbox_intr(pf);
2671 otx2_pfaf_mbox_destroy(pf);
2672 err_free_irq_vectors:
2673 pci_free_irq_vectors(hw->pdev);
2675 pci_set_drvdata(pdev, NULL);
2676 free_netdev(netdev);
2677 err_release_regions:
2678 pci_release_regions(pdev);
2682 static void otx2_vf_link_event_task(struct work_struct *work)
2684 struct otx2_vf_config *config;
2685 struct cgx_link_info_msg *req;
2686 struct mbox_msghdr *msghdr;
2687 struct otx2_nic *pf;
2690 config = container_of(work, struct otx2_vf_config,
2691 link_event_work.work);
2692 vf_idx = config - config->pf->vf_configs;
2695 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
2696 sizeof(*req), sizeof(struct msg_rsp));
2698 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
2702 req = (struct cgx_link_info_msg *)msghdr;
2703 req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
2704 req->hdr.sig = OTX2_MBOX_REQ_SIG;
2705 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
2707 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
2710 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
2712 struct net_device *netdev = pci_get_drvdata(pdev);
2713 struct otx2_nic *pf = netdev_priv(netdev);
2716 /* Init PF <=> VF mailbox stuff */
2717 ret = otx2_pfvf_mbox_init(pf, numvfs);
2721 ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
2725 ret = otx2_pf_flr_init(pf, numvfs);
2729 ret = otx2_register_flr_me_intr(pf, numvfs);
2733 ret = pci_enable_sriov(pdev, numvfs);
2739 otx2_disable_flr_me_intr(pf);
2741 otx2_flr_wq_destroy(pf);
2743 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2745 otx2_pfvf_mbox_destroy(pf);
2749 static int otx2_sriov_disable(struct pci_dev *pdev)
2751 struct net_device *netdev = pci_get_drvdata(pdev);
2752 struct otx2_nic *pf = netdev_priv(netdev);
2753 int numvfs = pci_num_vf(pdev);
2758 pci_disable_sriov(pdev);
2760 otx2_disable_flr_me_intr(pf);
2761 otx2_flr_wq_destroy(pf);
2762 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2763 otx2_pfvf_mbox_destroy(pf);
2768 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
2771 return otx2_sriov_disable(pdev);
2773 return otx2_sriov_enable(pdev, numvfs);
2776 static void otx2_remove(struct pci_dev *pdev)
2778 struct net_device *netdev = pci_get_drvdata(pdev);
2779 struct otx2_nic *pf;
2784 pf = netdev_priv(netdev);
2786 pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
2788 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
2789 otx2_config_hw_tx_tstamp(pf, false);
2790 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
2791 otx2_config_hw_rx_tstamp(pf, false);
2793 cancel_work_sync(&pf->reset_task);
2794 /* Disable link notifications */
2795 otx2_cgx_config_linkevents(pf, false);
2797 otx2_unregister_dl(pf);
2798 unregister_netdev(netdev);
2799 otx2_sriov_disable(pf->pdev);
2800 otx2_sriov_vfcfg_cleanup(pf);
2802 destroy_workqueue(pf->otx2_wq);
2804 otx2_ptp_destroy(pf);
2805 otx2_mcam_flow_del(pf);
2806 otx2_shutdown_tc(pf);
2807 otx2_detach_resources(&pf->mbox);
2808 if (pf->hw.lmt_info)
2809 free_percpu(pf->hw.lmt_info);
2810 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
2811 qmem_free(pf->dev, pf->dync_lmt);
2812 otx2_disable_mbox_intr(pf);
2813 otx2_pfaf_mbox_destroy(pf);
2814 pci_free_irq_vectors(pf->pdev);
2815 pci_set_drvdata(pdev, NULL);
2816 free_netdev(netdev);
2818 pci_release_regions(pdev);
2821 static struct pci_driver otx2_pf_driver = {
2823 .id_table = otx2_pf_id_table,
2824 .probe = otx2_probe,
2825 .shutdown = otx2_remove,
2826 .remove = otx2_remove,
2827 .sriov_configure = otx2_sriov_configure
2830 static int __init otx2_rvupf_init_module(void)
2832 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2834 return pci_register_driver(&otx2_pf_driver);
2837 static void __exit otx2_rvupf_cleanup_module(void)
2839 pci_unregister_driver(&otx2_pf_driver);
2842 module_init(otx2_rvupf_init_module);
2843 module_exit(otx2_rvupf_cleanup_module);