octeontx2-af: CGX Rx/Tx enable/disable mbox handlers
[linux-2.6-microblaze.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_cgx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu.h"
16 #include "cgx.h"
17
18 struct cgx_evq_entry {
19         struct list_head evq_node;
20         struct cgx_link_event link_event;
21 };
22
23 static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
24 {
25         return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
26 }
27
28 static void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
29 {
30         if (cgx_id >= rvu->cgx_cnt)
31                 return NULL;
32
33         return rvu->cgx_idmap[cgx_id];
34 }
35
36 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
37 {
38         int cgx_cnt = rvu->cgx_cnt;
39         int cgx, lmac_cnt, lmac;
40         int pf = PF_CGXMAP_BASE;
41         int size;
42
43         if (!cgx_cnt)
44                 return 0;
45
46         if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
47                 return -EINVAL;
48
49         /* Alloc map table
50          * An additional entry is required since PF id starts from 1 and
51          * hence entry at offset 0 is invalid.
52          */
53         size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
54         rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
55         if (!rvu->pf2cgxlmac_map)
56                 return -ENOMEM;
57
58         /* Initialize offset 0 with an invalid cgx and lmac id */
59         rvu->pf2cgxlmac_map[0] = 0xFF;
60
61         /* Reverse map table */
62         rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
63                                   cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
64                                   GFP_KERNEL);
65         if (!rvu->cgxlmac2pf_map)
66                 return -ENOMEM;
67
68         rvu->cgx_mapped_pfs = 0;
69         for (cgx = 0; cgx < cgx_cnt; cgx++) {
70                 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
71                 for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
72                         rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
73                         rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
74                         rvu->cgx_mapped_pfs++;
75                 }
76         }
77         return 0;
78 }
79
80 /* This is called from interrupt context and is expected to be atomic */
81 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
82 {
83         struct cgx_evq_entry *qentry;
84         struct rvu *rvu = data;
85
86         /* post event to the event queue */
87         qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
88         if (!qentry)
89                 return -ENOMEM;
90         qentry->link_event = *event;
91         spin_lock(&rvu->cgx_evq_lock);
92         list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
93         spin_unlock(&rvu->cgx_evq_lock);
94
95         /* start worker to process the events */
96         queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
97
98         return 0;
99 }
100
101 static void cgx_evhandler_task(struct work_struct *work)
102 {
103         struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
104         struct cgx_evq_entry *qentry;
105         struct cgx_link_event *event;
106         unsigned long flags;
107
108         do {
109                 /* Dequeue an event */
110                 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
111                 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
112                                                   struct cgx_evq_entry,
113                                                   evq_node);
114                 if (qentry)
115                         list_del(&qentry->evq_node);
116                 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
117                 if (!qentry)
118                         break; /* nothing more to process */
119
120                 event = &qentry->link_event;
121
122                 /* Do nothing for now */
123                 kfree(qentry);
124         } while (1);
125 }
126
127 static void cgx_lmac_event_handler_init(struct rvu *rvu)
128 {
129         struct cgx_event_cb cb;
130         int cgx, lmac, err;
131         void *cgxd;
132
133         spin_lock_init(&rvu->cgx_evq_lock);
134         INIT_LIST_HEAD(&rvu->cgx_evq_head);
135         INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
136         rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
137         if (!rvu->cgx_evh_wq) {
138                 dev_err(rvu->dev, "alloc workqueue failed");
139                 return;
140         }
141
142         cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
143         cb.data = rvu;
144
145         for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
146                 cgxd = rvu_cgx_pdata(cgx, rvu);
147                 for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
148                         err = cgx_lmac_evh_register(&cb, cgxd, lmac);
149                         if (err)
150                                 dev_err(rvu->dev,
151                                         "%d:%d handler register failed\n",
152                                         cgx, lmac);
153                 }
154         }
155 }
156
157 void rvu_cgx_wq_destroy(struct rvu *rvu)
158 {
159         if (rvu->cgx_evh_wq) {
160                 flush_workqueue(rvu->cgx_evh_wq);
161                 destroy_workqueue(rvu->cgx_evh_wq);
162                 rvu->cgx_evh_wq = NULL;
163         }
164 }
165
166 int rvu_cgx_probe(struct rvu *rvu)
167 {
168         int i, err;
169
170         /* find available cgx ports */
171         rvu->cgx_cnt = cgx_get_cgx_cnt();
172         if (!rvu->cgx_cnt) {
173                 dev_info(rvu->dev, "No CGX devices found!\n");
174                 return -ENODEV;
175         }
176
177         rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
178                                       GFP_KERNEL);
179         if (!rvu->cgx_idmap)
180                 return -ENOMEM;
181
182         /* Initialize the cgxdata table */
183         for (i = 0; i < rvu->cgx_cnt; i++)
184                 rvu->cgx_idmap[i] = cgx_get_pdata(i);
185
186         /* Map CGX LMAC interfaces to RVU PFs */
187         err = rvu_map_cgx_lmac_pf(rvu);
188         if (err)
189                 return err;
190
191         /* Register for CGX events */
192         cgx_lmac_event_handler_init(rvu);
193         return 0;
194 }
195
196 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
197 {
198         int pf = rvu_get_pf(pcifunc);
199         u8 cgx_id, lmac_id;
200
201         /* This msg is expected only from PFs that are mapped to CGX LMACs,
202          * if received from other PF/VF simply ACK, nothing to do.
203          */
204         if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
205                 return -ENODEV;
206
207         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
208
209         cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
210
211         return 0;
212 }
213
214 int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
215                                     struct msg_rsp *rsp)
216 {
217         rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
218         return 0;
219 }
220
221 int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
222                                    struct msg_rsp *rsp)
223 {
224         rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
225         return 0;
226 }