1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell OcteonTx2 RVU Ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #include <linux/pci.h>
15 #include <linux/iommu.h>
19 #include "otx2_txrx.h"
22 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
24 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
27 #define PCI_CFG_REG_BAR_NUM 2
28 #define PCI_MBOX_BAR_NUM 4
32 enum arua_mapped_qtypes {
37 /* NIX LF interrupts range*/
38 #define NIX_LF_QINT_VEC_START 0x00
39 #define NIX_LF_CINT_VEC_START 0x40
40 #define NIX_LF_GINT_VEC 0x80
41 #define NIX_LF_ERR_VEC 0x81
42 #define NIX_LF_POISON_VEC 0x82
44 /* RSS configuration */
45 struct otx2_rss_info {
49 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
50 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
51 u8 key[RSS_HASH_KEY_SIZE];
54 /* NIX (or NPC) RX errors */
65 NPC_ERRLVL_NIX = 0x0F,
68 enum otx2_errcodes_re {
69 /* NPC_ERRLVL_RE errcodes */
71 ERRCODE_FCS_RCV = 0x8,
72 ERRCODE_UNDERSIZE = 0x10,
73 ERRCODE_OVERSIZE = 0x11,
74 ERRCODE_OL2_LEN_MISMATCH = 0x12,
75 /* NPC_ERRLVL_NIX errcodes */
76 ERRCODE_OL3_LEN = 0x10,
77 ERRCODE_OL4_LEN = 0x11,
78 ERRCODE_OL4_CSUM = 0x12,
79 ERRCODE_IL3_LEN = 0x20,
80 ERRCODE_IL4_LEN = 0x21,
81 ERRCODE_IL4_CSUM = 0x22,
106 RX_DRP_L3BCAST = 0xa,
107 RX_DRP_L3MCAST = 0xb,
111 struct otx2_dev_stats {
127 /* Driver counted stats */
128 struct otx2_drv_stats {
129 atomic_t rx_fcs_errs;
130 atomic_t rx_oversize_errs;
131 atomic_t rx_undersize_errs;
132 atomic_t rx_csum_errs;
133 atomic_t rx_len_errs;
134 atomic_t rx_other_errs;
138 struct otx2_mbox mbox;
139 struct work_struct mbox_wrk;
140 struct otx2_mbox mbox_up;
141 struct work_struct mbox_up_wrk;
142 struct otx2_nic *pfvf;
143 void *bbuf_base; /* Bounce buffer for mbox memory */
144 struct mutex lock; /* serialize mailbox access */
145 int num_msgs; /* mbox number of messages */
146 int up_num_msgs; /* mbox_up number of messages */
150 struct pci_dev *pdev;
151 struct otx2_rss_info rss_info;
160 u32 stack_pg_ptrs; /* No of ptrs per stack page */
161 u32 stack_pg_bytes; /* Size of stack page */
165 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
167 /* HW settings, coalescing etc */
175 /* For TSO segmentation */
181 u8 cint_cnt; /* CQ interrupt count */
182 u16 npa_msixoff; /* Offset of NPA vectors */
183 u16 nix_msixoff; /* Offset of NIX vectors */
185 cpumask_var_t *affinity_mask;
188 struct otx2_dev_stats dev_stats;
189 struct otx2_drv_stats drv_stats;
190 u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
191 u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
195 struct delayed_work pool_refill_work;
200 void __iomem *reg_base;
201 struct net_device *netdev;
204 u16 rbsize; /* Receive buffer size */
206 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
209 struct otx2_qset qset;
211 struct pci_dev *pdev;
216 struct workqueue_struct *mbox_wq;
218 u16 pcifunc; /* RVU PF_FUNC */
219 struct cgx_link_user_info linfo;
222 struct work_struct reset_task;
223 struct refill_work *refill_wrk;
228 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
232 static inline bool is_96xx_A0(struct pci_dev *pdev)
234 return (pdev->revision == 0x00) &&
235 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
238 static inline bool is_96xx_B0(struct pci_dev *pdev)
240 return (pdev->revision == 0x01) &&
241 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
244 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
246 struct otx2_hw *hw = &pfvf->hw;
248 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
249 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
250 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
254 if (is_96xx_A0(pfvf->pdev)) {
257 /* Time based irq coalescing is not supported */
258 pfvf->hw.cq_qcount_wait = 0x0;
260 /* Due to HW issue previous silicons required minimum
261 * 600 unused CQE to avoid CQ overflow.
263 pfvf->hw.rq_skid = 600;
264 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
268 /* Register read/write APIs */
269 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
273 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
275 blkaddr = nic->nix_blkaddr;
278 blkaddr = BLKADDR_NPA;
281 blkaddr = BLKADDR_RVUM;
285 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
286 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
288 return nic->reg_base + offset;
291 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
293 void __iomem *addr = otx2_get_regaddr(nic, offset);
298 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
300 void __iomem *addr = otx2_get_regaddr(nic, offset);
305 /* Mbox bounce buffer APIs */
306 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
308 struct otx2_mbox *otx2_mbox;
309 struct otx2_mbox_dev *mdev;
311 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
312 if (!mbox->bbuf_base)
315 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
316 * prepare all mbox messages in bounce buffer instead of directly
319 otx2_mbox = &mbox->mbox;
320 mdev = &otx2_mbox->dev[0];
321 mdev->mbase = mbox->bbuf_base;
323 otx2_mbox = &mbox->mbox_up;
324 mdev = &otx2_mbox->dev[0];
325 mdev->mbase = mbox->bbuf_base;
329 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
331 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
332 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
333 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
334 struct mbox_hdr *hdr;
337 if (mdev->mbase == hw_mbase)
340 hdr = hw_mbase + mbox->rx_start;
341 msg_size = hdr->msg_size;
343 if (msg_size > mbox->rx_size - msgs_offset)
344 msg_size = mbox->rx_size - msgs_offset;
346 /* Copy mbox messages from mbox memory to bounce buffer */
347 memcpy(mdev->mbase + mbox->rx_start,
348 hw_mbase + mbox->rx_start, msg_size + msgs_offset);
351 static inline void otx2_mbox_lock_init(struct mbox *mbox)
353 mutex_init(&mbox->lock);
356 static inline void otx2_mbox_lock(struct mbox *mbox)
358 mutex_lock(&mbox->lock);
361 static inline void otx2_mbox_unlock(struct mbox *mbox)
363 mutex_unlock(&mbox->lock);
366 /* With the absence of API for 128-bit IO memory access for arm64,
367 * implement required operations at place.
369 #if defined(CONFIG_ARM64)
370 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
372 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
373 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
376 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
380 __asm__ volatile(".cpu generic+lse\n"
381 "ldadd %x[i], %x[r], [%[b]]"
382 : [r]"=r"(result), "+m"(*ptr)
383 : [i]"r"(incr), [b]"r"(ptr)
388 static inline u64 otx2_lmt_flush(uint64_t addr)
392 __asm__ volatile(".cpu generic+lse\n"
393 "ldeor xzr,%x[rf],[%[rs]]"
400 #define otx2_write128(lo, hi, addr)
401 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
402 #define otx2_lmt_flush(addr) ({ 0; })
405 /* Alloc pointer from pool/aura */
406 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
408 u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
409 NPA_LF_AURA_OP_ALLOCX(0));
410 u64 incr = (u64)aura | BIT_ULL(63);
412 return otx2_atomic64_add(incr, ptr);
415 /* Free pointer to a pool/aura */
416 static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
419 otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
420 otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
423 /* Update page ref count */
424 static inline void otx2_get_page(struct otx2_pool *pool)
430 page_ref_add(pool->page, pool->pageref);
435 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
437 if (type == AURA_NIX_SQ)
438 return pfvf->hw.rqpool_cnt + idx;
445 static inline int otx2_sync_mbox_msg(struct mbox *mbox)
449 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
451 otx2_mbox_msg_send(&mbox->mbox, 0);
452 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
456 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
459 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
463 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
465 otx2_mbox_msg_send(&mbox->mbox_up, devid);
466 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
470 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
473 /* Use this API to send mbox msgs in atomic context
474 * where sleeping is not allowed
476 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
480 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
482 otx2_mbox_msg_send(&mbox->mbox, 0);
483 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
487 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
490 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
491 static struct _req_type __maybe_unused \
492 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
494 struct _req_type *req; \
496 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
497 &mbox->mbox, 0, sizeof(struct _req_type), \
498 sizeof(struct _rsp_type)); \
501 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
509 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
511 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
512 struct _req_type *req, \
513 struct _rsp_type *rsp); \
518 /* Time to wait before watchdog kicks off */
519 #define OTX2_TX_TIMEOUT (100 * HZ)
521 #define RVU_PFVF_PF_SHIFT 10
522 #define RVU_PFVF_PF_MASK 0x3F
523 #define RVU_PFVF_FUNC_SHIFT 0
524 #define RVU_PFVF_FUNC_MASK 0x3FF
526 static inline int rvu_get_pf(u16 pcifunc)
528 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
531 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
533 size_t offset, size_t size,
534 enum dma_data_direction dir)
538 iova = dma_map_page_attrs(pfvf->dev, page,
539 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
540 if (unlikely(dma_mapping_error(pfvf->dev, iova)))
541 return (dma_addr_t)NULL;
545 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
546 dma_addr_t addr, size_t size,
547 enum dma_data_direction dir)
549 dma_unmap_page_attrs(pfvf->dev, addr, size,
550 dir, DMA_ATTR_SKIP_CPU_SYNC);
554 void otx2_free_cints(struct otx2_nic *pfvf, int n);
555 void otx2_set_cints_affinity(struct otx2_nic *pfvf);
556 int otx2_set_mac_address(struct net_device *netdev, void *p);
557 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
558 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
559 void otx2_get_mac_from_af(struct net_device *netdev);
560 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
562 /* RVU block related APIs */
563 int otx2_attach_npa_nix(struct otx2_nic *pfvf);
564 int otx2_detach_resources(struct mbox *mbox);
565 int otx2_config_npa(struct otx2_nic *pfvf);
566 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
567 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
568 void otx2_aura_pool_free(struct otx2_nic *pfvf);
569 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
570 void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
571 int otx2_config_nix(struct otx2_nic *pfvf);
572 int otx2_config_nix_queues(struct otx2_nic *pfvf);
573 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
574 int otx2_txsch_alloc(struct otx2_nic *pfvf);
575 int otx2_txschq_stop(struct otx2_nic *pfvf);
576 void otx2_sqb_flush(struct otx2_nic *pfvf);
577 dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
579 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
580 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
581 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
582 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
584 /* RSS configuration APIs*/
585 int otx2_rss_init(struct otx2_nic *pfvf);
586 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
587 void otx2_set_rss_key(struct otx2_nic *pfvf);
588 int otx2_set_rss_table(struct otx2_nic *pfvf);
591 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
592 struct msix_offset_rsp *rsp);
593 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
594 struct npa_lf_alloc_rsp *rsp);
595 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
596 struct nix_lf_alloc_rsp *rsp);
597 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
598 struct nix_txsch_alloc_rsp *rsp);
599 void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
600 struct cgx_stats_rsp *rsp);
602 /* Device stats APIs */
603 void otx2_get_dev_stats(struct otx2_nic *pfvf);
604 void otx2_get_stats64(struct net_device *netdev,
605 struct rtnl_link_stats64 *stats);
606 void otx2_update_lmac_stats(struct otx2_nic *pfvf);
607 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
608 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
609 void otx2_set_ethtool_ops(struct net_device *netdev);
611 int otx2_open(struct net_device *netdev);
612 int otx2_stop(struct net_device *netdev);
613 int otx2_set_real_num_queues(struct net_device *netdev,
614 int tx_queues, int rx_queues);
615 #endif /* OTX2_COMMON_H */