1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/acpi.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/phy.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
24 #define DRV_NAME "octeontx2-cgx"
25 #define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
29 * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
30 * @cmd_lock: Lock to serialize the command interface
31 * @resp: command response
32 * @event_cb: callback for linkchange events
33 * @cmd_pend: flag set before new command is started
34 * flag cleared after command response is received
35 * @cgx: parent cgx port
36 * @lmac_id: lmac port id
37 * @name: lmac port name
40 wait_queue_head_t wq_cmd_cmplt;
41 struct mutex cmd_lock;
43 struct cgx_event_cb event_cb;
51 void __iomem *reg_base;
55 struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
56 struct list_head cgx_list;
59 static LIST_HEAD(cgx_list);
61 /* Supported devices */
62 static const struct pci_device_id cgx_id_table[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
64 { 0, } /* end of table */
67 MODULE_DEVICE_TABLE(pci, cgx_id_table);
69 static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
71 writeq(val, cgx->reg_base + (lmac << 18) + offset);
74 static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
76 return readq(cgx->reg_base + (lmac << 18) + offset);
79 static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
81 if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
84 return cgx->lmac_idmap[lmac_id];
87 int cgx_get_cgx_cnt(void)
92 list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
97 EXPORT_SYMBOL(cgx_get_cgx_cnt);
99 int cgx_get_lmac_cnt(void *cgxd)
101 struct cgx *cgx = cgxd;
106 return cgx->lmac_count;
108 EXPORT_SYMBOL(cgx_get_lmac_cnt);
110 void *cgx_get_pdata(int cgx_id)
114 list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
115 if (cgx_dev->cgx_id == cgx_id)
120 EXPORT_SYMBOL(cgx_get_pdata);
122 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
124 struct cgx *cgx = cgxd;
127 if (!cgx || lmac_id >= cgx->lmac_count)
130 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
132 cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
134 cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
135 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
138 EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
140 /* CGX Firmware interface low level support */
141 static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
143 struct cgx *cgx = lmac->cgx;
148 /* Ensure no other command is in progress */
149 err = mutex_lock_interruptible(&lmac->cmd_lock);
153 /* Ensure command register is free */
154 cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
155 if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
160 /* Update ownership in command request */
161 req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
163 /* Mark this lmac as pending, before we start */
164 lmac->cmd_pend = true;
166 /* Start command in hardware */
167 cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
169 /* Ensure command is completed without errors */
170 if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
171 msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
172 dev = &cgx->pdev->dev;
173 dev_err(dev, "cgx port %d:%d cmd timeout\n",
174 cgx->cgx_id, lmac->lmac_id);
179 /* we have a valid command response */
180 smp_rmb(); /* Ensure the latest updates are visible */
184 mutex_unlock(&lmac->cmd_lock);
189 static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
190 struct cgx *cgx, int lmac_id)
195 lmac = lmac_pdata(lmac_id, cgx);
199 err = cgx_fwi_cmd_send(req, resp, lmac);
201 /* Check for valid response */
203 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
212 /* Hardware event handlers */
213 static inline void cgx_link_change_handler(u64 lstat,
216 struct cgx *cgx = lmac->cgx;
217 struct cgx_link_event event;
220 dev = &cgx->pdev->dev;
222 event.lstat.link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
223 event.lstat.full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
224 event.lstat.speed = FIELD_GET(RESP_LINKSTAT_SPEED, lstat);
225 event.lstat.err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
227 event.cgx_id = cgx->cgx_id;
228 event.lmac_id = lmac->lmac_id;
230 if (!lmac->event_cb.notify_link_chg) {
231 dev_dbg(dev, "cgx port %d:%d Link change handler null",
232 cgx->cgx_id, lmac->lmac_id);
233 if (event.lstat.err_type != CGX_ERR_NONE) {
234 dev_err(dev, "cgx port %d:%d Link error %d\n",
235 cgx->cgx_id, lmac->lmac_id,
236 event.lstat.err_type);
238 dev_info(dev, "cgx port %d:%d Link status %s, speed %x\n",
239 cgx->cgx_id, lmac->lmac_id,
240 event.lstat.link_up ? "UP" : "DOWN",
245 if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
246 dev_err(dev, "event notification failure\n");
249 static inline bool cgx_cmdresp_is_linkevent(u64 event)
253 id = FIELD_GET(EVTREG_ID, event);
254 if (id == CGX_CMD_LINK_BRING_UP ||
255 id == CGX_CMD_LINK_BRING_DOWN)
261 static inline bool cgx_event_is_linkevent(u64 event)
263 if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
269 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
271 struct lmac *lmac = data;
277 event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
279 if (!FIELD_GET(EVTREG_ACK, event))
282 switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
283 case CGX_EVT_CMD_RESP:
284 /* Copy the response. Since only one command is active at a
285 * time, there is no way a response can get overwritten
288 /* Ensure response is updated before thread context starts */
291 /* There wont be separate events for link change initiated from
292 * software; Hence report the command responses as events
294 if (cgx_cmdresp_is_linkevent(event))
295 cgx_link_change_handler(event, lmac);
297 /* Release thread waiting for completion */
298 lmac->cmd_pend = false;
299 wake_up_interruptible(&lmac->wq_cmd_cmplt);
302 if (cgx_event_is_linkevent(event))
303 cgx_link_change_handler(event, lmac);
307 /* Any new event or command response will be posted by firmware
308 * only after the current status is acked.
309 * Ack the interrupt register as well.
311 cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
312 cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
317 /* APIs for PHY management using CGX firmware interface */
319 /* callback registration for hardware events like link change */
320 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
322 struct cgx *cgx = cgxd;
325 lmac = lmac_pdata(lmac_id, cgx);
329 lmac->event_cb = *cb;
333 EXPORT_SYMBOL(cgx_lmac_evh_register);
335 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
339 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
340 return cgx_fwi_cmd_generic(req, resp, cgx, 0);
343 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
345 struct device *dev = &cgx->pdev->dev;
346 int major_ver, minor_ver;
350 if (!cgx->lmac_count)
353 err = cgx_fwi_read_version(&resp, cgx);
357 major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
358 minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
359 dev_dbg(dev, "Firmware command interface version = %d.%d\n",
360 major_ver, minor_ver);
361 if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
362 minor_ver != CGX_FIRMWARE_MINOR_VER)
368 static int cgx_lmac_init(struct cgx *cgx)
373 cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
374 if (cgx->lmac_count > MAX_LMAC_PER_CGX)
375 cgx->lmac_count = MAX_LMAC_PER_CGX;
377 for (i = 0; i < cgx->lmac_count; i++) {
378 lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
381 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
384 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
387 init_waitqueue_head(&lmac->wq_cmd_cmplt);
388 mutex_init(&lmac->cmd_lock);
389 err = request_irq(pci_irq_vector(cgx->pdev,
390 CGX_LMAC_FWI + i * 9),
391 cgx_fwi_event_handler, 0, lmac->name, lmac);
395 /* Enable interrupt */
396 cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
400 cgx->lmac_idmap[i] = lmac;
403 return cgx_lmac_verify_fwi_version(cgx);
406 static int cgx_lmac_exit(struct cgx *cgx)
411 /* Free all lmac related resources */
412 for (i = 0; i < cgx->lmac_count; i++) {
413 lmac = cgx->lmac_idmap[i];
416 free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
424 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
426 struct device *dev = &pdev->dev;
430 cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
435 pci_set_drvdata(pdev, cgx);
437 err = pci_enable_device(pdev);
439 dev_err(dev, "Failed to enable PCI device\n");
440 pci_set_drvdata(pdev, NULL);
444 err = pci_request_regions(pdev, DRV_NAME);
446 dev_err(dev, "PCI request regions failed 0x%x\n", err);
447 goto err_disable_device;
450 /* MAP configuration registers */
451 cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
452 if (!cgx->reg_base) {
453 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
455 goto err_release_regions;
459 err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
460 if (err < 0 || err != nvec) {
461 dev_err(dev, "Request for %d msix vectors failed, err %d\n",
463 goto err_release_regions;
466 list_add(&cgx->cgx_list, &cgx_list);
467 cgx->cgx_id = cgx_get_cgx_cnt() - 1;
469 err = cgx_lmac_init(cgx);
471 goto err_release_lmac;
477 list_del(&cgx->cgx_list);
479 pci_release_regions(pdev);
481 pci_disable_device(pdev);
482 pci_set_drvdata(pdev, NULL);
486 static void cgx_remove(struct pci_dev *pdev)
488 struct cgx *cgx = pci_get_drvdata(pdev);
491 list_del(&cgx->cgx_list);
492 pci_free_irq_vectors(pdev);
493 pci_release_regions(pdev);
494 pci_disable_device(pdev);
495 pci_set_drvdata(pdev, NULL);
498 struct pci_driver cgx_driver = {
500 .id_table = cgx_id_table,
502 .remove = cgx_remove,