1 /* SPDX-License-Identifier: GPL-2.0
2 * Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #include <linux/etherdevice.h>
15 #include <linux/sizes.h>
17 #include "rvu_struct.h"
20 #define MBOX_SIZE SZ_64K
22 /* AF/PF: PF initiated, PF/VF VF initiated */
23 #define MBOX_DOWN_RX_START 0
24 #define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
25 #define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
26 #define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
27 /* AF/PF: AF initiated, PF/VF PF initiated */
28 #define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
29 #define MBOX_UP_RX_SIZE SZ_1K
30 #define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
31 #define MBOX_UP_TX_SIZE SZ_1K
33 #if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
34 # error "incorrect mailbox area sizes"
37 #define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
39 #define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
41 #define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
43 /* Mailbox directions */
44 #define MBOX_DIR_AFPF 0 /* AF replies to PF */
45 #define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
46 #define MBOX_DIR_PFVF 2 /* PF replies to VF */
47 #define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
48 #define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
49 #define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
50 #define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
51 #define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
53 struct otx2_mbox_dev {
54 void *mbase; /* This dev's mbox region */
56 u16 msg_size; /* Total msg size to be sent */
57 u16 rsp_size; /* Total rsp size to be sure the reply is ok */
58 u16 num_msgs; /* No of msgs sent or waiting for response */
59 u16 msgs_acked; /* No of msgs for which response is received */
64 void *hwbase; /* Mbox region advertised by HW */
65 void *reg_base;/* CSR base for this dev */
66 u64 trigger; /* Trigger mbox notification */
67 u16 tr_shift; /* Mbox trigger shift */
68 u64 rx_start; /* Offset of Rx region in mbox memory */
69 u64 tx_start; /* Offset of Tx region in mbox memory */
70 u16 rx_size; /* Size of Rx region */
71 u16 tx_size; /* Size of Tx region */
72 u16 ndevs; /* The number of peers */
73 struct otx2_mbox_dev *dev;
76 /* Header which preceeds all mbox messages */
78 u16 num_msgs; /* No of msgs embedded */
81 /* Header which preceeds every msg and is also part of it */
83 u16 pcifunc; /* Who's sending this msg */
84 u16 id; /* Mbox message ID */
85 #define OTX2_MBOX_REQ_SIG (0xdead)
86 #define OTX2_MBOX_RSP_SIG (0xbeef)
87 u16 sig; /* Signature, for validating corrupted msgs */
88 #define OTX2_MBOX_VERSION (0x0001)
89 u16 ver; /* Version of msg's structure for this ID */
90 u16 next_msgoff; /* Offset of next msg within mailbox region */
91 int rc; /* Msg process'ed response code */
94 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
95 void otx2_mbox_destroy(struct otx2_mbox *mbox);
96 int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
97 struct pci_dev *pdev, void __force *reg_base,
98 int direction, int ndevs);
99 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
100 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
101 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
102 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
103 int size, int size_rsp);
104 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
105 struct mbox_msghdr *msg);
106 int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
107 u16 pcifunc, u16 id);
108 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
109 const char *otx2_mbox_id2name(u16 id);
110 static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
113 return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
116 /* Mailbox message types */
117 #define MBOX_MSG_MASK 0xFFFF
118 #define MBOX_MSG_INVALID 0xFFFE
119 #define MBOX_MSG_MAX 0xFFFF
121 #define MBOX_MESSAGES \
122 /* Generic mbox IDs (range 0x000 - 0x1FF) */ \
123 M(READY, 0x001, msg_req, ready_msg_rsp) \
124 M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
125 M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
126 M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
127 /* CGX mbox IDs (range 0x200 - 0x3FF) */ \
128 M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
129 M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
130 M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
131 M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
132 cgx_mac_addr_set_or_get) \
133 M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
134 cgx_mac_addr_set_or_get) \
135 M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
136 M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
137 M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
138 M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
139 M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
140 M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
141 M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
142 /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
143 M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
144 M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
145 M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
146 M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
147 /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
148 /* TIM mbox IDs (range 0x800 - 0x9FF) */ \
149 /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
150 /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
151 /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
152 M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
153 M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
154 M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
155 M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
156 M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
157 M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
158 M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \
159 M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp)
161 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
162 #define MBOX_UP_CGX_MESSAGES \
163 M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
166 #define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
172 /* Mailbox message formats */
174 #define RVU_DEFAULT_PF_FUNC 0xFFFF
176 /* Generic request msg used for those mbox messages which
177 * don't send any data in the request.
180 struct mbox_msghdr hdr;
183 /* Generic rsponse msg used a ack or response for those mbox
184 * messages which doesn't have a specific rsp msg format.
187 struct mbox_msghdr hdr;
190 struct ready_msg_rsp {
191 struct mbox_msghdr hdr;
192 u16 sclk_feq; /* SCLK frequency */
195 /* Structure for requesting resource provisioning.
196 * 'modify' flag to be used when either requesting more
197 * or to detach partial of a cetain resource type.
198 * Rest of the fields specify how many of what type to
202 struct mbox_msghdr hdr;
212 /* Structure for relinquishing resources.
213 * 'partial' flag to be used when relinquishing all resources
214 * but only of a certain type. If not set, all resources of all
215 * types provisioned to the RVU function will be detached.
218 struct mbox_msghdr hdr;
228 #define MSIX_VECTOR_INVALID 0xFFFF
229 #define MAX_RVU_BLKLF_CNT 256
231 struct msix_offset_rsp {
232 struct mbox_msghdr hdr;
239 u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
240 u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
241 u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
242 u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
245 /* CGX mbox message formats */
247 struct cgx_stats_rsp {
248 struct mbox_msghdr hdr;
249 #define CGX_RX_STATS_COUNT 13
250 #define CGX_TX_STATS_COUNT 18
251 u64 rx_stats[CGX_RX_STATS_COUNT];
252 u64 tx_stats[CGX_TX_STATS_COUNT];
255 /* Structure for requesting the operation for
256 * setting/getting mac address in the CGX interface
258 struct cgx_mac_addr_set_or_get {
259 struct mbox_msghdr hdr;
260 u8 mac_addr[ETH_ALEN];
263 struct cgx_link_user_info {
265 uint64_t full_duplex:1;
266 uint64_t lmac_type_id:4;
267 uint64_t speed:20; /* speed in Mbps */
268 #define LMACTYPE_STR_LEN 16
269 char lmac_type[LMACTYPE_STR_LEN];
272 struct cgx_link_info_msg {
273 struct mbox_msghdr hdr;
274 struct cgx_link_user_info link_info;
277 /* NPA mbox message formats */
279 /* NPA mailbox error codes
283 NPA_AF_ERR_PARAM = -301,
284 NPA_AF_ERR_AQ_FULL = -302,
285 NPA_AF_ERR_AQ_ENQUEUE = -303,
286 NPA_AF_ERR_AF_LF_INVALID = -304,
287 NPA_AF_ERR_AF_LF_ALLOC = -305,
288 NPA_AF_ERR_LF_RESET = -306,
291 /* For NPA LF context alloc and init */
292 struct npa_lf_alloc_req {
293 struct mbox_msghdr hdr;
295 int aura_sz; /* No of auras */
296 u32 nr_pools; /* No of pools */
299 struct npa_lf_alloc_rsp {
300 struct mbox_msghdr hdr;
301 u32 stack_pg_ptrs; /* No of ptrs per stack page */
302 u32 stack_pg_bytes; /* Size of stack page */
303 u16 qints; /* NPA_AF_CONST::QINTS */
306 /* NPA AQ enqueue msg */
307 struct npa_aq_enq_req {
308 struct mbox_msghdr hdr;
313 /* Valid when op == WRITE/INIT and ctype == AURA.
314 * LF fills the pool_id in aura.pool_addr. AF will translate
315 * the pool_id to pool context pointer.
317 struct npa_aura_s aura;
318 /* Valid when op == WRITE/INIT and ctype == POOL */
319 struct npa_pool_s pool;
321 /* Mask data when op == WRITE (1=write, 0=don't write) */
323 /* Valid when op == WRITE and ctype == AURA */
324 struct npa_aura_s aura_mask;
325 /* Valid when op == WRITE and ctype == POOL */
326 struct npa_pool_s pool_mask;
330 struct npa_aq_enq_rsp {
331 struct mbox_msghdr hdr;
333 /* Valid when op == READ and ctype == AURA */
334 struct npa_aura_s aura;
335 /* Valid when op == READ and ctype == POOL */
336 struct npa_pool_s pool;
340 /* Disable all contexts of type 'ctype' */
341 struct hwctx_disable_req {
342 struct mbox_msghdr hdr;
346 /* NIX mailbox error codes
350 NIX_AF_ERR_PARAM = -401,
351 NIX_AF_ERR_AQ_FULL = -402,
352 NIX_AF_ERR_AQ_ENQUEUE = -403,
353 NIX_AF_ERR_AF_LF_INVALID = -404,
354 NIX_AF_ERR_AF_LF_ALLOC = -405,
355 NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
356 NIX_AF_ERR_TLX_INVALID = -407,
357 NIX_AF_ERR_RSS_SIZE_INVALID = -408,
358 NIX_AF_ERR_RSS_GRPS_INVALID = -409,
359 NIX_AF_ERR_FRS_INVALID = -410,
360 NIX_AF_ERR_RX_LINK_INVALID = -411,
361 NIX_AF_INVAL_TXSCHQ_CFG = -412,
362 NIX_AF_SMQ_FLUSH_FAILED = -413,
363 NIX_AF_ERR_LF_RESET = -414,
366 /* For NIX LF context alloc and init */
367 struct nix_lf_alloc_req {
368 struct mbox_msghdr hdr;
370 u32 rq_cnt; /* No of receive queues */
371 u32 sq_cnt; /* No of send queues */
372 u32 cq_cnt; /* No of completion queues */
378 u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
381 struct nix_lf_alloc_rsp {
382 struct mbox_msghdr hdr;
386 u8 mac_addr[ETH_ALEN];
389 /* NIX AQ enqueue msg */
390 struct nix_aq_enq_req {
391 struct mbox_msghdr hdr;
396 struct nix_rq_ctx_s rq;
397 struct nix_sq_ctx_s sq;
398 struct nix_cq_ctx_s cq;
399 struct nix_rsse_s rss;
400 struct nix_rx_mce_s mce;
403 struct nix_rq_ctx_s rq_mask;
404 struct nix_sq_ctx_s sq_mask;
405 struct nix_cq_ctx_s cq_mask;
406 struct nix_rsse_s rss_mask;
407 struct nix_rx_mce_s mce_mask;
411 struct nix_aq_enq_rsp {
412 struct mbox_msghdr hdr;
414 struct nix_rq_ctx_s rq;
415 struct nix_sq_ctx_s sq;
416 struct nix_cq_ctx_s cq;
417 struct nix_rsse_s rss;
418 struct nix_rx_mce_s mce;
422 /* Tx scheduler/shaper mailbox messages */
424 #define MAX_TXSCHQ_PER_FUNC 128
426 struct nix_txsch_alloc_req {
427 struct mbox_msghdr hdr;
428 /* Scheduler queue count request at each level */
429 u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
430 u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
433 struct nix_txsch_alloc_rsp {
434 struct mbox_msghdr hdr;
435 /* Scheduler queue count allocated at each level */
436 u16 schq_contig[NIX_TXSCH_LVL_CNT];
437 u16 schq[NIX_TXSCH_LVL_CNT];
438 /* Scheduler queue list allocated at each level */
439 u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
440 u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
443 struct nix_txsch_free_req {
444 struct mbox_msghdr hdr;
445 #define TXSCHQ_FREE_ALL BIT_ULL(0)
447 /* Scheduler queue level to be freed */
449 /* List of scheduler queues to be freed */
453 struct nix_txschq_config {
454 struct mbox_msghdr hdr;
455 u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
456 #define TXSCHQ_IDX_SHIFT 16
457 #define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
458 #define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
460 #define MAX_REGS_PER_MBOX_MSG 20
461 u64 reg[MAX_REGS_PER_MBOX_MSG];
462 u64 regval[MAX_REGS_PER_MBOX_MSG];