1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
6 #include <linux/delay.h>
7 #include <linux/types.h>
8 #include <linux/completion.h>
9 #include <linux/semaphore.h>
10 #include <linux/spinlock.h>
11 #include <linux/workqueue.h>
13 #include "hinic_hw_if.h"
14 #include "hinic_hw_mgmt.h"
15 #include "hinic_hw_csr.h"
16 #include "hinic_hw_dev.h"
17 #include "hinic_hw_mbox.h"
19 #define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
20 #define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
21 #define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
22 #define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
23 /* The size of data to be sended (unit of 4 bytes) */
24 #define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
25 /* SO_RO(strong order, relax order) */
26 #define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
27 #define HINIC_MBOX_INT_WB_EN_SHIFT 28
29 #define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF
30 #define HINIC_MBOX_INT_DST_AEQN_MASK 0x3
31 #define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3
32 #define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F
33 #define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F
34 #define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3
35 #define HINIC_MBOX_INT_WB_EN_MASK 0x1
37 #define HINIC_MBOX_INT_SET(val, field) \
38 (((val) & HINIC_MBOX_INT_##field##_MASK) << \
39 HINIC_MBOX_INT_##field##_SHIFT)
41 enum hinic_mbox_tx_status {
45 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
47 /* specifies the issue request for the message data.
48 * 0 - Tx request is done;
49 * 1 - Tx request is in process.
51 #define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1
53 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1
54 #define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1
56 #define HINIC_MBOX_CTRL_SET(val, field) \
57 (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
58 HINIC_MBOX_CTRL_##field##_SHIFT)
60 #define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
61 #define HINIC_MBOX_HEADER_MODULE_SHIFT 11
62 #define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16
63 #define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
64 #define HINIC_MBOX_HEADER_SEQID_SHIFT 24
65 #define HINIC_MBOX_HEADER_LAST_SHIFT 30
67 /* specifies the mailbox message direction
71 #define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31
72 #define HINIC_MBOX_HEADER_CMD_SHIFT 32
73 #define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40
74 #define HINIC_MBOX_HEADER_STATUS_SHIFT 48
75 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54
77 #define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF
78 #define HINIC_MBOX_HEADER_MODULE_MASK 0x1F
79 #define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F
80 #define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1
81 #define HINIC_MBOX_HEADER_SEQID_MASK 0x3F
82 #define HINIC_MBOX_HEADER_LAST_MASK 0x1
83 #define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1
84 #define HINIC_MBOX_HEADER_CMD_MASK 0xFF
85 #define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF
86 #define HINIC_MBOX_HEADER_STATUS_MASK 0x3F
87 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF
89 #define HINIC_MBOX_HEADER_GET(val, field) \
90 (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
91 HINIC_MBOX_HEADER_##field##_MASK)
92 #define HINIC_MBOX_HEADER_SET(val, field) \
93 ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
94 HINIC_MBOX_HEADER_##field##_SHIFT)
96 #define MBOX_SEGLEN_MASK \
97 HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
99 #define HINIC_MBOX_SEG_LEN 48
100 #define HINIC_MBOX_COMP_TIME 8000U
101 #define MBOX_MSG_POLLING_TIMEOUT 8000
103 #define HINIC_MBOX_DATA_SIZE 2040
105 #define MBOX_MAX_BUF_SZ 2048UL
106 #define MBOX_HEADER_SZ 8
108 #define MBOX_INFO_SZ 4
110 /* MBOX size is 64B, 8B for mbox_header, 4B reserved */
111 #define MBOX_SEG_LEN 48
112 #define MBOX_SEG_LEN_ALIGN 4
113 #define MBOX_WB_STATUS_LEN 16UL
115 /* mbox write back status is 16B, only first 4B is used */
116 #define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
117 #define MBOX_WB_STATUS_MASK 0xFF
118 #define MBOX_WB_ERROR_CODE_MASK 0xFF00
119 #define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
120 #define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
121 #define MBOX_WB_STATUS_NOT_FINISHED 0x00
123 #define MBOX_STATUS_FINISHED(wb) \
124 (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
125 #define MBOX_STATUS_SUCCESS(wb) \
126 (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
127 #define MBOX_STATUS_ERRCODE(wb) \
128 ((wb) & MBOX_WB_ERROR_CODE_MASK)
130 #define SEQ_ID_START_VAL 0
131 #define SEQ_ID_MAX_VAL 42
133 #define DST_AEQ_IDX_DEFAULT_VAL 0
134 #define SRC_AEQ_IDX_DEFAULT_VAL 0
135 #define NO_DMA_ATTRIBUTE_VAL 0
137 #define HINIC_MGMT_RSP_AEQN 0
138 #define HINIC_MBOX_RSP_AEQN 2
139 #define HINIC_MBOX_RECV_AEQN 0
141 #define MBOX_MSG_NO_DATA_LEN 1
143 #define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
144 #define MBOX_AREA(hwif) \
145 ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
147 #define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
149 #define MBOX_RESPONSE_ERROR 0x1
150 #define MBOX_MSG_ID_MASK 0xFF
151 #define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
152 #define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
153 (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
155 #define FUNC_ID_OFF_SET_8B 8
156 #define FUNC_ID_OFF_SET_10B 10
158 /* max message counter wait to process for one function */
159 #define HINIC_MAX_MSG_CNT_TO_PROCESS 10
161 #define HINIC_QUEUE_MIN_DEPTH 6
162 #define HINIC_QUEUE_MAX_DEPTH 12
163 #define HINIC_MAX_RX_BUFFER_SIZE 15
165 enum hinic_hwif_direction_type {
166 HINIC_HWIF_DIRECT_SEND = 0,
167 HINIC_HWIF_RESPONSE = 1,
179 enum mbox_ordering_type {
183 enum mbox_write_back_type {
187 enum mbox_aeq_trig_type {
193 * hinic_register_pf_mbox_cb - register mbox callback for pf
194 * @hwdev: the pointer to hw device
195 * @mod: specific mod that the callback will handle
196 * @callback: callback function
197 * Return: 0 - success, negative - failure
199 int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
200 enum hinic_mod_type mod,
201 hinic_pf_mbox_cb callback)
203 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
205 if (mod >= HINIC_MOD_MAX)
208 func_to_func->pf_mbox_cb[mod] = callback;
210 set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
216 * hinic_register_vf_mbox_cb - register mbox callback for vf
217 * @hwdev: the pointer to hw device
218 * @mod: specific mod that the callback will handle
219 * @callback: callback function
220 * Return: 0 - success, negative - failure
222 int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
223 enum hinic_mod_type mod,
224 hinic_vf_mbox_cb callback)
226 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
228 if (mod >= HINIC_MOD_MAX)
231 func_to_func->vf_mbox_cb[mod] = callback;
233 set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
239 * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
240 * @hwdev: the pointer to hw device
241 * @mod: specific mod that the callback will handle
243 void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
244 enum hinic_mod_type mod)
246 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
248 clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
250 while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
251 &func_to_func->pf_mbox_cb_state[mod]))
252 usleep_range(900, 1000);
254 func_to_func->pf_mbox_cb[mod] = NULL;
258 * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
259 * @hwdev: the pointer to hw device
260 * @mod: specific mod that the callback will handle
262 void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
263 enum hinic_mod_type mod)
265 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
267 clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
269 while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
270 &func_to_func->vf_mbox_cb_state[mod]))
271 usleep_range(900, 1000);
273 func_to_func->vf_mbox_cb[mod] = NULL;
276 static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
277 struct hinic_recv_mbox *recv_mbox,
278 void *buf_out, u16 *out_size)
283 if (recv_mbox->mod >= HINIC_MOD_MAX) {
284 dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
289 set_bit(HINIC_VF_MBOX_CB_RUNNING,
290 &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
292 cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
293 if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
294 &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
295 cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
296 recv_mbox->mbox_len, buf_out, out_size);
298 dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
302 clear_bit(HINIC_VF_MBOX_CB_RUNNING,
303 &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
309 recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
310 struct hinic_recv_mbox *recv_mbox,
311 u16 src_func_idx, void *buf_out,
318 if (recv_mbox->mod >= HINIC_MOD_MAX) {
319 dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
324 set_bit(HINIC_PF_MBOX_CB_RUNNING,
325 &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
327 cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
328 if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
329 &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
330 vf_id = src_func_idx -
331 hinic_glb_pf_vf_offset(func_to_func->hwif);
332 ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
333 recv_mbox->mbox, recv_mbox->mbox_len,
336 dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
341 clear_bit(HINIC_PF_MBOX_CB_RUNNING,
342 &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
347 static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
348 u8 seq_id, u8 seg_len)
350 if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
354 recv_mbox->seq_id = seq_id;
356 if (seq_id != recv_mbox->seq_id + 1)
359 recv_mbox->seq_id = seq_id;
365 static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
366 struct hinic_recv_mbox *recv_mbox)
368 spin_lock(&func_to_func->mbox_lock);
369 if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
370 func_to_func->event_flag == EVENT_START)
371 complete(&recv_mbox->recv_done);
373 dev_err(&func_to_func->hwif->pdev->dev,
374 "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
375 func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
376 recv_mbox->msg_info.status);
377 spin_unlock(&func_to_func->mbox_lock);
380 static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
381 struct hinic_recv_mbox *recv_mbox,
384 static void recv_func_mbox_work_handler(struct work_struct *work)
386 struct hinic_mbox_work *mbox_work =
387 container_of(work, struct hinic_mbox_work, work);
388 struct hinic_recv_mbox *recv_mbox;
390 recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
391 mbox_work->src_func_idx);
394 &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
396 atomic_dec(&recv_mbox->msg_cnt);
401 static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
402 void *header, struct hinic_recv_mbox *recv_mbox)
404 void *mbox_body = MBOX_BODY_FROM_HDR(header);
405 struct hinic_recv_mbox *rcv_mbox_temp = NULL;
406 u64 mbox_header = *((u64 *)header);
407 struct hinic_mbox_work *mbox_work;
412 seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
413 seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
414 src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
416 if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
417 dev_err(&func_to_func->hwif->pdev->dev,
418 "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
419 src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
420 recv_mbox->seq_id = SEQ_ID_MAX_VAL;
424 pos = seq_id * MBOX_SEG_LEN;
425 memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
426 HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
428 if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
431 recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
432 recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
433 recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
434 recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
435 recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
436 recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
437 recv_mbox->seq_id = SEQ_ID_MAX_VAL;
439 if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
440 HINIC_HWIF_RESPONSE) {
441 resp_mbox_handler(func_to_func, recv_mbox);
445 if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
446 dev_warn(&func_to_func->hwif->pdev->dev,
447 "This function(%u) have %d message wait to process,can't add to work queue\n",
448 src_func_idx, atomic_read(&recv_mbox->msg_cnt));
452 rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL);
456 rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ,
458 if (!rcv_mbox_temp->mbox)
459 goto err_alloc_rcv_mbox_msg;
461 rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
462 if (!rcv_mbox_temp->buf_out)
463 goto err_alloc_rcv_mbox_buf;
465 mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
467 goto err_alloc_mbox_work;
469 mbox_work->func_to_func = func_to_func;
470 mbox_work->recv_mbox = rcv_mbox_temp;
471 mbox_work->src_func_idx = src_func_idx;
473 atomic_inc(&recv_mbox->msg_cnt);
474 INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
475 queue_work(func_to_func->workq, &mbox_work->work);
480 kfree(rcv_mbox_temp->buf_out);
482 err_alloc_rcv_mbox_buf:
483 kfree(rcv_mbox_temp->mbox);
485 err_alloc_rcv_mbox_msg:
486 kfree(rcv_mbox_temp);
489 void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
491 struct hinic_mbox_func_to_func *func_to_func;
492 u64 mbox_header = *((u64 *)header);
493 struct hinic_recv_mbox *recv_mbox;
496 func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
498 dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
499 src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
501 if (src >= HINIC_MAX_FUNCTIONS) {
502 dev_err(&func_to_func->hwif->pdev->dev,
503 "Mailbox source function id:%u is invalid\n", (u32)src);
507 recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
508 &func_to_func->mbox_send[src] :
509 &func_to_func->mbox_resp[src];
511 recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
514 void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
516 struct hinic_mbox_func_to_func *func_to_func;
517 struct hinic_send_mbox *send_mbox;
519 func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
520 send_mbox = &func_to_func->send_mbox;
522 complete(&send_mbox->send_done);
525 static void clear_mbox_status(struct hinic_send_mbox *mbox)
527 *mbox->wb_status = 0;
529 /* clear mailbox write back status */
533 static void mbox_copy_header(struct hinic_hwdev *hwdev,
534 struct hinic_send_mbox *mbox, u64 *header)
536 u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
537 u32 *data = (u32 *)header;
539 for (i = 0; i < idx_max; i++)
540 __raw_writel(*(data + i), mbox->data + i * sizeof(u32));
543 static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
544 struct hinic_send_mbox *mbox, void *seg,
547 u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
548 u32 data_len, chk_sz = sizeof(u32);
552 /* The mbox message should be aligned in 4 bytes. */
553 if (seg_len % chk_sz) {
554 memcpy(mbox_max_buf, seg, seg_len);
555 data = (u32 *)mbox_max_buf;
559 idx_max = ALIGN(data_len, chk_sz) / chk_sz;
561 for (i = 0; i < idx_max; i++)
562 __raw_writel(*(data + i),
563 mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
566 static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
567 u16 dst_func, u16 dst_aeqn, u16 seg_len,
570 u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
571 u32 mbox_int, mbox_ctrl;
573 mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
574 HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
575 HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
576 HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
577 HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
578 MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
580 HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
581 HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
583 hinic_hwif_write_reg(func_to_func->hwif,
584 HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
586 wmb(); /* writing the mbox int attributes */
587 mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
590 mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
592 mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
594 hinic_hwif_write_reg(func_to_func->hwif,
595 HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
598 static void dump_mox_reg(struct hinic_hwdev *hwdev)
602 val = hinic_hwif_read_reg(hwdev->hwif,
603 HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
604 dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
606 val = hinic_hwif_read_reg(hwdev->hwif,
607 HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
608 dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
612 static u16 get_mbox_status(struct hinic_send_mbox *mbox)
614 /* write back is 16B, but only use first 4B */
615 u64 wb_val = be64_to_cpu(*mbox->wb_status);
617 rmb(); /* verify reading before check */
619 return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
623 wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
624 int poll, u16 *wb_status)
626 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
627 struct hinic_hwdev *hwdev = func_to_func->hwdev;
628 struct completion *done = &send_mbox->send_done;
633 while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
634 *wb_status = get_mbox_status(send_mbox);
635 if (MBOX_STATUS_FINISHED(*wb_status))
638 usleep_range(900, 1000);
642 if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
643 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
649 jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
650 if (!wait_for_completion_timeout(done, jif)) {
651 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
656 *wb_status = get_mbox_status(send_mbox);
662 static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
663 u64 header, u16 dst_func, void *seg, u16 seg_len,
664 int poll, void *msg_info)
666 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
667 u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
668 struct hinic_hwdev *hwdev = func_to_func->hwdev;
669 struct completion *done = &send_mbox->send_done;
670 u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
671 u16 dst_aeqn, wb_status = 0, errcode;
674 dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
675 HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
680 init_completion(done);
682 clear_mbox_status(send_mbox);
684 mbox_copy_header(hwdev, send_mbox, &header);
686 mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
688 write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
690 wmb(); /* writing the mbox msg attributes */
692 if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
695 if (!MBOX_STATUS_SUCCESS(wb_status)) {
696 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
697 dst_func, wb_status);
698 errcode = MBOX_STATUS_ERRCODE(wb_status);
699 return errcode ? errcode : -EFAULT;
705 static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
706 enum hinic_mod_type mod, u16 cmd, void *msg,
707 u16 msg_len, u16 dst_func,
708 enum hinic_hwif_direction_type direction,
709 enum hinic_mbox_ack_type ack_type,
710 struct mbox_msg_info *msg_info)
712 struct hinic_hwdev *hwdev = func_to_func->hwdev;
713 u16 seg_len = MBOX_SEG_LEN;
714 u8 *msg_seg = (u8 *)msg;
720 down(&func_to_func->msg_send_sem);
722 header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
723 HINIC_MBOX_HEADER_SET(mod, MODULE) |
724 HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
725 HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
726 HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
727 HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
728 HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
729 HINIC_MBOX_HEADER_SET(cmd, CMD) |
730 /* The vf's offset to it's associated pf */
731 HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
732 HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
733 HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
736 while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
737 if (left <= HINIC_MBOX_SEG_LEN) {
738 header &= ~MBOX_SEGLEN_MASK;
739 header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
740 header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
745 err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
746 seg_len, MBOX_SEND_MSG_INT, msg_info);
748 dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
749 HINIC_MBOX_HEADER_GET(header, SEQID));
750 goto err_send_mbox_seg;
753 left -= HINIC_MBOX_SEG_LEN;
754 msg_seg += HINIC_MBOX_SEG_LEN;
757 header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
759 header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
763 up(&func_to_func->msg_send_sem);
769 response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
770 struct hinic_recv_mbox *recv_mbox, int err,
771 u16 out_size, u16 src_func_idx)
773 struct mbox_msg_info msg_info = {0};
775 if (recv_mbox->ack_type == MBOX_ACK) {
776 msg_info.msg_id = recv_mbox->msg_info.msg_id;
777 if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
778 msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
779 else if (err == HINIC_MBOX_VF_CMD_ERROR)
780 msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
782 msg_info.status = HINIC_MBOX_PF_SEND_ERR;
784 /* if no data needs to response, set out_size to 1 */
785 if (!out_size || err)
786 out_size = MBOX_MSG_NO_DATA_LEN;
788 send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
789 recv_mbox->buf_out, out_size, src_func_idx,
790 HINIC_HWIF_RESPONSE, MBOX_ACK,
795 static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
796 struct hinic_recv_mbox *recv_mbox,
799 void *buf_out = recv_mbox->buf_out;
800 u16 out_size = MBOX_MAX_BUF_SZ;
803 if (HINIC_IS_VF(func_to_func->hwif)) {
804 err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
807 if (IS_PF_OR_PPF_SRC(src_func_idx))
808 dev_warn(&func_to_func->hwif->pdev->dev,
809 "Unsupported pf2pf mbox msg\n");
811 err = recv_pf_from_vf_mbox_handler(func_to_func,
817 response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
819 kfree(recv_mbox->buf_out);
820 kfree(recv_mbox->mbox);
824 static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
825 enum mbox_event_state event_flag)
827 spin_lock(&func_to_func->mbox_lock);
828 func_to_func->event_flag = event_flag;
829 spin_unlock(&func_to_func->mbox_lock);
832 static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
833 struct hinic_recv_mbox *mbox_for_resp,
834 enum hinic_mod_type mod, u16 cmd,
835 void *buf_out, u16 *out_size)
839 if (mbox_for_resp->msg_info.status) {
840 err = mbox_for_resp->msg_info.status;
841 if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
842 dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
843 mbox_for_resp->msg_info.status);
847 if (buf_out && out_size) {
848 if (*out_size < mbox_for_resp->mbox_len) {
849 dev_err(&func_to_func->hwif->pdev->dev,
850 "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
851 mbox_for_resp->mbox_len, mod, cmd, *out_size);
855 if (mbox_for_resp->mbox_len)
856 memcpy(buf_out, mbox_for_resp->mbox,
857 mbox_for_resp->mbox_len);
859 *out_size = mbox_for_resp->mbox_len;
865 int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
866 enum hinic_mod_type mod, u16 cmd, u16 dst_func,
867 void *buf_in, u16 in_size, void *buf_out,
868 u16 *out_size, u32 timeout)
870 struct hinic_recv_mbox *mbox_for_resp;
871 struct mbox_msg_info msg_info = {0};
875 mbox_for_resp = &func_to_func->mbox_resp[dst_func];
877 down(&func_to_func->mbox_send_sem);
879 init_completion(&mbox_for_resp->recv_done);
881 msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
883 set_mbox_to_func_event(func_to_func, EVENT_START);
885 err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
886 dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
889 dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
891 set_mbox_to_func_event(func_to_func, EVENT_FAIL);
895 timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
896 if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
897 set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
898 dev_err(&func_to_func->hwif->pdev->dev,
899 "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
904 set_mbox_to_func_event(func_to_func, EVENT_END);
906 err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
910 up(&func_to_func->mbox_send_sem);
915 static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
916 void *buf_in, u16 in_size)
918 if (in_size > HINIC_MBOX_DATA_SIZE) {
919 dev_err(&func_to_func->hwif->pdev->dev,
920 "Mbox msg len(%d) exceed limit(%d)\n",
921 in_size, HINIC_MBOX_DATA_SIZE);
928 int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
929 enum hinic_mod_type mod, u8 cmd, void *buf_in,
930 u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
932 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
933 int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
938 if (!HINIC_IS_VF(hwdev->hwif)) {
939 dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
940 HINIC_FUNC_TYPE(hwdev->hwif));
944 return hinic_mbox_to_func(func_to_func, mod, cmd,
945 hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
946 in_size, buf_out, out_size, timeout);
949 int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
950 enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
951 u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
953 struct hinic_mbox_func_to_func *func_to_func;
960 func_to_func = hwdev->func_to_func;
961 err = mbox_func_params_valid(func_to_func, buf_in, in_size);
965 if (HINIC_IS_VF(hwdev->hwif)) {
966 dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
967 HINIC_FUNC_TYPE(hwdev->hwif));
972 dev_err(&hwdev->hwif->pdev->dev,
973 "VF id(%d) error!\n", vf_id);
977 /* vf_offset_to_pf + vf_id is the vf's global function id of vf in
980 dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
982 return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
983 in_size, buf_out, out_size, timeout);
986 static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
990 mbox_info->seq_id = SEQ_ID_MAX_VAL;
992 mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
993 if (!mbox_info->mbox)
996 mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
997 if (!mbox_info->buf_out) {
999 goto err_alloc_buf_out;
1002 atomic_set(&mbox_info->msg_cnt, 0);
1007 kfree(mbox_info->mbox);
1012 static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
1014 kfree(mbox_info->buf_out);
1015 kfree(mbox_info->mbox);
1018 static int alloc_mbox_info(struct hinic_hwdev *hwdev,
1019 struct hinic_recv_mbox *mbox_info)
1024 for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
1025 err = init_mbox_info(&mbox_info[func_idx]);
1027 dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
1029 goto err_init_mbox_info;
1036 for (i = 0; i < func_idx; i++)
1037 clean_mbox_info(&mbox_info[i]);
1042 static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
1046 for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
1047 clean_mbox_info(&mbox_info[func_idx]);
1050 static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
1052 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1054 send_mbox->data = MBOX_AREA(func_to_func->hwif);
1057 static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
1059 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1060 struct hinic_hwdev *hwdev = func_to_func->hwdev;
1063 send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
1065 &send_mbox->wb_paddr,
1067 if (!send_mbox->wb_vaddr)
1070 send_mbox->wb_status = send_mbox->wb_vaddr;
1072 addr_h = upper_32_bits(send_mbox->wb_paddr);
1073 addr_l = lower_32_bits(send_mbox->wb_paddr);
1075 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
1077 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
1083 static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
1085 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1086 struct hinic_hwdev *hwdev = func_to_func->hwdev;
1088 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
1090 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
1093 dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
1094 send_mbox->wb_vaddr,
1095 send_mbox->wb_paddr);
1098 static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
1099 u16 in_size, void *buf_out, u16 *out_size)
1101 struct hinic_hwdev *hwdev = handle;
1102 struct hinic_pfhwdev *pfhwdev;
1105 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
1107 if (cmd == HINIC_COMM_CMD_START_FLR) {
1110 err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
1111 cmd, buf_in, in_size, buf_out, out_size,
1112 HINIC_MGMT_MSG_SYNC);
1113 if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
1114 dev_err(&hwdev->hwif->pdev->dev,
1115 "PF mbox common callback handler err: %d\n",
1122 int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
1124 struct hinic_mbox_func_to_func *func_to_func;
1125 struct hinic_pfhwdev *pfhwdev;
1128 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
1129 func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
1133 hwdev->func_to_func = func_to_func;
1134 func_to_func->hwdev = hwdev;
1135 func_to_func->hwif = hwdev->hwif;
1136 sema_init(&func_to_func->mbox_send_sem, 1);
1137 sema_init(&func_to_func->msg_send_sem, 1);
1138 spin_lock_init(&func_to_func->mbox_lock);
1139 func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
1140 if (!func_to_func->workq) {
1141 dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
1143 goto err_create_mbox_workq;
1146 err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
1148 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
1149 goto err_alloc_mbox_for_send;
1152 err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
1154 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
1155 goto err_alloc_mbox_for_resp;
1158 err = alloc_mbox_wb_status(func_to_func);
1160 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
1161 goto err_alloc_wb_status;
1164 prepare_send_mbox(func_to_func);
1166 hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
1167 &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
1168 hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
1169 &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
1171 if (!HINIC_IS_VF(hwdev->hwif))
1172 hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
1173 comm_pf_mbox_handler);
1177 err_alloc_wb_status:
1178 free_mbox_info(func_to_func->mbox_resp);
1180 err_alloc_mbox_for_resp:
1181 free_mbox_info(func_to_func->mbox_send);
1183 err_alloc_mbox_for_send:
1184 destroy_workqueue(func_to_func->workq);
1186 err_create_mbox_workq:
1187 kfree(func_to_func);
1192 void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
1194 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
1196 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
1197 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
1199 hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
1200 /* destroy workqueue before free related mbox resources in case of
1201 * illegal resource access
1203 destroy_workqueue(func_to_func->workq);
1205 free_mbox_wb_status(func_to_func);
1206 free_mbox_info(func_to_func->mbox_resp);
1207 free_mbox_info(func_to_func->mbox_send);
1209 kfree(func_to_func);