void __iomem *reg_base;
struct pci_dev *pdev;
u8 cgx_id;
+ u8 lmac_count;
+ struct list_head cgx_list;
};
+static LIST_HEAD(cgx_list);
+
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
MODULE_DEVICE_TABLE(pci, cgx_id_table);
+static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << 18) + offset);
+}
+
+int cgx_get_cgx_cnt(void)
+{
+ struct cgx *cgx_dev;
+ int count = 0;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL(cgx_get_cgx_cnt);
+
+int cgx_get_lmac_cnt(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx->lmac_count;
+}
+EXPORT_SYMBOL(cgx_get_lmac_cnt);
+
+void *cgx_get_pdata(int cgx_id)
+{
+ struct cgx *cgx_dev;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
+ if (cgx_dev->cgx_id == cgx_id)
+ return cgx_dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(cgx_get_pdata);
+
+static void cgx_lmac_init(struct cgx *cgx)
+{
+ cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ if (cgx->lmac_count > MAX_LMAC_PER_CGX)
+ cgx->lmac_count = MAX_LMAC_PER_CGX;
+}
+
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
goto err_release_regions;
}
+ list_add(&cgx->cgx_list, &cgx_list);
+ cgx->cgx_id = cgx_get_cgx_cnt() - 1;
+ cgx_lmac_init(cgx);
+
return 0;
err_release_regions:
+ list_del(&cgx->cgx_list);
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
static void cgx_remove(struct pci_dev *pdev)
{
+ struct cgx *cgx = pci_get_drvdata(pdev);
+
+ list_del(&cgx->cgx_list);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
#define CGX_H
/* PCI device IDs */
-#define PCI_DEVID_OCTEONTX2_CGX 0xA059
+#define PCI_DEVID_OCTEONTX2_CGX 0xA059
/* PCI BAR nos */
-#define PCI_CFG_REG_BAR_NUM 0
+#define PCI_CFG_REG_BAR_NUM 0
+
+#define MAX_CGX 3
+#define MAX_LMAC_PER_CGX 4
+#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
+
+/* Registers */
+#define CGXX_CMRX_RX_ID_MAP 0x060
+#define CGXX_CMRX_RX_LMACS 0x128
extern struct pci_driver cgx_driver;
+int cgx_get_cgx_cnt(void);
+int cgx_get_lmac_cnt(void *cgxd);
+void *cgx_get_pdata(int cgx_id);
#endif /* CGX_H */
char *irq_name;
bool *irq_allocated;
dma_addr_t msix_base_iova;
+
+ /* CGX */
+#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u8 cgx_mapped_pfs;
+ u8 cgx_cnt; /* available cgx ports */
+ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
+ u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ * every cgx lmac port
+ */
+ void **cgx_idmap; /* cgx id to cgx data map table */
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+/* CGX APIs */
+int rvu_cgx_probe(struct rvu *rvu);
#endif /* RVU_H */