1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
18 struct cgx_evq_entry {
19 struct list_head evq_node;
20 struct cgx_link_event link_event;
23 static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
25 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
28 static void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
30 if (cgx_id >= rvu->cgx_cnt)
33 return rvu->cgx_idmap[cgx_id];
36 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
38 int cgx_cnt = rvu->cgx_cnt;
39 int cgx, lmac_cnt, lmac;
40 int pf = PF_CGXMAP_BASE;
46 if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
50 * An additional entry is required since PF id starts from 1 and
51 * hence entry at offset 0 is invalid.
53 size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
54 rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
55 if (!rvu->pf2cgxlmac_map)
58 /* Initialize offset 0 with an invalid cgx and lmac id */
59 rvu->pf2cgxlmac_map[0] = 0xFF;
61 /* Reverse map table */
62 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
63 cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
65 if (!rvu->cgxlmac2pf_map)
68 rvu->cgx_mapped_pfs = 0;
69 for (cgx = 0; cgx < cgx_cnt; cgx++) {
70 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
71 for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
72 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
73 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
74 rvu->cgx_mapped_pfs++;
80 /* This is called from interrupt context and is expected to be atomic */
81 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
83 struct cgx_evq_entry *qentry;
84 struct rvu *rvu = data;
86 /* post event to the event queue */
87 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
90 qentry->link_event = *event;
91 spin_lock(&rvu->cgx_evq_lock);
92 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
93 spin_unlock(&rvu->cgx_evq_lock);
95 /* start worker to process the events */
96 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
101 static void cgx_evhandler_task(struct work_struct *work)
103 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
104 struct cgx_evq_entry *qentry;
105 struct cgx_link_event *event;
109 /* Dequeue an event */
110 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
111 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
112 struct cgx_evq_entry,
115 list_del(&qentry->evq_node);
116 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
118 break; /* nothing more to process */
120 event = &qentry->link_event;
122 /* Do nothing for now */
127 static void cgx_lmac_event_handler_init(struct rvu *rvu)
129 struct cgx_event_cb cb;
133 spin_lock_init(&rvu->cgx_evq_lock);
134 INIT_LIST_HEAD(&rvu->cgx_evq_head);
135 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
136 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
137 if (!rvu->cgx_evh_wq) {
138 dev_err(rvu->dev, "alloc workqueue failed");
142 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
145 for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
146 cgxd = rvu_cgx_pdata(cgx, rvu);
147 for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
148 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
151 "%d:%d handler register failed\n",
157 void rvu_cgx_wq_destroy(struct rvu *rvu)
159 if (rvu->cgx_evh_wq) {
160 flush_workqueue(rvu->cgx_evh_wq);
161 destroy_workqueue(rvu->cgx_evh_wq);
162 rvu->cgx_evh_wq = NULL;
166 int rvu_cgx_probe(struct rvu *rvu)
170 /* find available cgx ports */
171 rvu->cgx_cnt = cgx_get_cgx_cnt();
173 dev_info(rvu->dev, "No CGX devices found!\n");
177 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
182 /* Initialize the cgxdata table */
183 for (i = 0; i < rvu->cgx_cnt; i++)
184 rvu->cgx_idmap[i] = cgx_get_pdata(i);
186 /* Map CGX LMAC interfaces to RVU PFs */
187 err = rvu_map_cgx_lmac_pf(rvu);
191 /* Register for CGX events */
192 cgx_lmac_event_handler_init(rvu);
196 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
198 int pf = rvu_get_pf(pcifunc);
201 /* This msg is expected only from PFs that are mapped to CGX LMACs,
202 * if received from other PF/VF simply ACK, nothing to do.
204 if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
207 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
209 cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
214 int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
217 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
221 int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
224 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);