2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include <linux/acpi.h>
35 #include <linux/etherdevice.h>
36 #include <linux/interrupt.h>
38 #include <linux/of_platform.h>
39 #include <rdma/ib_umem.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_cmd.h"
43 #include "hns_roce_hem.h"
44 #include "hns_roce_hw_v1.h"
47 * hns_get_gid_index - Get gid index.
48 * @hr_dev: pointer to structure hns_roce_dev.
49 * @port: port, value range: 0 ~ MAX
50 * @gid_index: gid_index, value range: 0 ~ MAX
52 * N ports shared gids, allocation method as follow:
53 * GID[0][0], GID[1][0],.....GID[N - 1][0],
54 * GID[0][0], GID[1][0],.....GID[N - 1][0],
57 u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index)
59 return gid_index * hr_dev->caps.num_ports + port;
62 static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
64 dseg->lkey = cpu_to_le32(sg->lkey);
65 dseg->addr = cpu_to_le64(sg->addr);
66 dseg->len = cpu_to_le32(sg->length);
69 static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
72 rseg->raddr = cpu_to_le64(remote_addr);
73 rseg->rkey = cpu_to_le32(rkey);
77 static int hns_roce_v1_post_send(struct ib_qp *ibqp,
78 const struct ib_send_wr *wr,
79 const struct ib_send_wr **bad_wr)
81 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
82 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
83 struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
84 struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
85 struct hns_roce_wqe_data_seg *dseg = NULL;
86 struct hns_roce_qp *qp = to_hr_qp(ibqp);
87 struct device *dev = &hr_dev->pdev->dev;
88 struct hns_roce_sq_db sq_db = {};
90 unsigned long flags = 0;
99 if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
100 ibqp->qp_type != IB_QPT_RC)) {
101 dev_err(dev, "un-supported QP type\n");
106 spin_lock_irqsave(&qp->sq.lock, flags);
108 for (nreq = 0; wr; ++nreq, wr = wr->next) {
109 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
115 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
117 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
118 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
119 wr->num_sge, qp->sq.max_gs);
125 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
126 qp->sq.wrid[wqe_idx] = wr->wr_id;
128 /* Corresponding to the RC and RD type wqe process separately */
129 if (ibqp->qp_type == IB_QPT_GSI) {
131 roce_set_field(ud_sq_wqe->dmac_h,
132 UD_SEND_WQE_U32_4_DMAC_0_M,
133 UD_SEND_WQE_U32_4_DMAC_0_S,
135 roce_set_field(ud_sq_wqe->dmac_h,
136 UD_SEND_WQE_U32_4_DMAC_1_M,
137 UD_SEND_WQE_U32_4_DMAC_1_S,
139 roce_set_field(ud_sq_wqe->dmac_h,
140 UD_SEND_WQE_U32_4_DMAC_2_M,
141 UD_SEND_WQE_U32_4_DMAC_2_S,
143 roce_set_field(ud_sq_wqe->dmac_h,
144 UD_SEND_WQE_U32_4_DMAC_3_M,
145 UD_SEND_WQE_U32_4_DMAC_3_S,
148 roce_set_field(ud_sq_wqe->u32_8,
149 UD_SEND_WQE_U32_8_DMAC_4_M,
150 UD_SEND_WQE_U32_8_DMAC_4_S,
152 roce_set_field(ud_sq_wqe->u32_8,
153 UD_SEND_WQE_U32_8_DMAC_5_M,
154 UD_SEND_WQE_U32_8_DMAC_5_S,
157 smac = (u8 *)hr_dev->dev_addr[qp->port];
158 loopback = ether_addr_equal_unaligned(ah->av.mac,
160 roce_set_bit(ud_sq_wqe->u32_8,
161 UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
164 roce_set_field(ud_sq_wqe->u32_8,
165 UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
166 UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
167 HNS_ROCE_WQE_OPCODE_SEND);
168 roce_set_field(ud_sq_wqe->u32_8,
169 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
170 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
172 roce_set_bit(ud_sq_wqe->u32_8,
173 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
176 ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
177 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
178 (wr->send_flags & IB_SEND_SOLICITED ?
179 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
180 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
181 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
183 roce_set_field(ud_sq_wqe->u32_16,
184 UD_SEND_WQE_U32_16_DEST_QP_M,
185 UD_SEND_WQE_U32_16_DEST_QP_S,
186 ud_wr(wr)->remote_qpn);
187 roce_set_field(ud_sq_wqe->u32_16,
188 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
189 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
192 roce_set_field(ud_sq_wqe->u32_36,
193 UD_SEND_WQE_U32_36_FLOW_LABEL_M,
194 UD_SEND_WQE_U32_36_FLOW_LABEL_S,
196 roce_set_field(ud_sq_wqe->u32_36,
197 UD_SEND_WQE_U32_36_PRIORITY_M,
198 UD_SEND_WQE_U32_36_PRIORITY_S,
200 roce_set_field(ud_sq_wqe->u32_36,
201 UD_SEND_WQE_U32_36_SGID_INDEX_M,
202 UD_SEND_WQE_U32_36_SGID_INDEX_S,
203 hns_get_gid_index(hr_dev, qp->phy_port,
206 roce_set_field(ud_sq_wqe->u32_40,
207 UD_SEND_WQE_U32_40_HOP_LIMIT_M,
208 UD_SEND_WQE_U32_40_HOP_LIMIT_S,
210 roce_set_field(ud_sq_wqe->u32_40,
211 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
212 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
215 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
218 cpu_to_le32((u32)wr->sg_list[0].addr);
220 cpu_to_le32((wr->sg_list[0].addr) >> 32);
222 cpu_to_le32(wr->sg_list[0].lkey);
225 cpu_to_le32((u32)wr->sg_list[1].addr);
227 cpu_to_le32((wr->sg_list[1].addr) >> 32);
229 cpu_to_le32(wr->sg_list[1].lkey);
230 } else if (ibqp->qp_type == IB_QPT_RC) {
234 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
235 for (i = 0; i < wr->num_sge; i++)
236 tmp_len += wr->sg_list[i].length;
239 cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
244 switch (wr->opcode) {
245 case IB_WR_SEND_WITH_IMM:
246 case IB_WR_RDMA_WRITE_WITH_IMM:
247 ctrl->imm_data = wr->ex.imm_data;
249 case IB_WR_SEND_WITH_INV:
251 cpu_to_le32(wr->ex.invalidate_rkey);
258 /* Ctrl field, ctrl set type: sig, solic, imm, fence */
259 /* SO wait for conforming application scenarios */
260 ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
261 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
262 (wr->send_flags & IB_SEND_SOLICITED ?
263 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
264 ((wr->opcode == IB_WR_SEND_WITH_IMM ||
265 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
266 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
267 (wr->send_flags & IB_SEND_FENCE ?
268 (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
270 wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
272 switch (wr->opcode) {
273 case IB_WR_RDMA_READ:
274 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
275 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
278 case IB_WR_RDMA_WRITE:
279 case IB_WR_RDMA_WRITE_WITH_IMM:
280 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
281 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
285 case IB_WR_SEND_WITH_INV:
286 case IB_WR_SEND_WITH_IMM:
287 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
289 case IB_WR_LOCAL_INV:
290 case IB_WR_ATOMIC_CMP_AND_SWP:
291 case IB_WR_ATOMIC_FETCH_AND_ADD:
294 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
297 ctrl->flag |= cpu_to_le32(ps_opcode);
298 wqe += sizeof(struct hns_roce_wqe_raddr_seg);
301 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
302 if (le32_to_cpu(ctrl->msg_length) >
303 hr_dev->caps.max_sq_inline) {
306 dev_err(dev, "inline len(1-%d)=%d, illegal",
307 le32_to_cpu(ctrl->msg_length),
308 hr_dev->caps.max_sq_inline);
311 for (i = 0; i < wr->num_sge; i++) {
312 memcpy(wqe, ((void *) (uintptr_t)
313 wr->sg_list[i].addr),
314 wr->sg_list[i].length);
315 wqe += wr->sg_list[i].length;
317 ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
320 for (i = 0; i < wr->num_sge; i++)
321 set_data_seg(dseg + i, wr->sg_list + i);
323 ctrl->flag |= cpu_to_le32(wr->num_sge <<
324 HNS_ROCE_WQE_SGE_NUM_BIT);
334 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
335 SQ_DOORBELL_U32_4_SQ_HEAD_S,
336 (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
337 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
338 SQ_DOORBELL_U32_4_SL_S, qp->sl);
339 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
340 SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
341 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
342 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
343 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
345 doorbell[0] = sq_db.u32_4;
346 doorbell[1] = sq_db.u32_8;
348 hns_roce_write64_k(doorbell, qp->sq.db_reg);
351 spin_unlock_irqrestore(&qp->sq.lock, flags);
356 static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
357 const struct ib_recv_wr *wr,
358 const struct ib_recv_wr **bad_wr)
360 struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
361 struct hns_roce_wqe_data_seg *scat = NULL;
362 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
363 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
364 struct device *dev = &hr_dev->pdev->dev;
365 struct hns_roce_rq_db rq_db = {};
366 __le32 doorbell[2] = {0};
367 unsigned long flags = 0;
368 unsigned int wqe_idx;
374 spin_lock_irqsave(&hr_qp->rq.lock, flags);
376 for (nreq = 0; wr; ++nreq, wr = wr->next) {
377 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
378 hr_qp->ibqp.recv_cq)) {
384 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
386 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
387 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
388 wr->num_sge, hr_qp->rq.max_gs);
394 ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
396 roce_set_field(ctrl->rwqe_byte_12,
397 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
398 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
401 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
403 for (i = 0; i < wr->num_sge; i++)
404 set_data_seg(scat + i, wr->sg_list + i);
406 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
411 hr_qp->rq.head += nreq;
413 if (ibqp->qp_type == IB_QPT_GSI) {
416 /* SW update GSI rq header */
417 reg_val = roce_read(to_hr_dev(ibqp->device),
418 ROCEE_QP1C_CFG3_0_REG +
419 QP1C_CFGN_OFFSET * hr_qp->phy_port);
420 tmp = cpu_to_le32(reg_val);
422 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
423 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
425 reg_val = le32_to_cpu(tmp);
426 roce_write(to_hr_dev(ibqp->device),
427 ROCEE_QP1C_CFG3_0_REG +
428 QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
430 roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
431 RQ_DOORBELL_U32_4_RQ_HEAD_S,
433 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
434 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
435 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
436 RQ_DOORBELL_U32_8_CMD_S, 1);
437 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
440 doorbell[0] = rq_db.u32_4;
441 doorbell[1] = rq_db.u32_8;
443 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
446 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
451 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
452 int sdb_mode, int odb_mode)
457 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
458 tmp = cpu_to_le32(val);
459 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
460 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
461 val = le32_to_cpu(tmp);
462 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
465 static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev,
466 struct hns_roce_hem_table *table, int obj,
469 spinlock_t *lock = &hr_dev->bt_cmd_lock;
470 struct device *dev = hr_dev->dev;
471 struct hns_roce_hem_iter iter;
472 void __iomem *bt_cmd;
473 __le32 bt_cmd_val[2];
481 /* Find the HEM(Hardware Entry Memory) entry */
482 unsigned long i = obj / (table->table_chunk_size / table->obj_size);
484 switch (table->type) {
489 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
490 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
496 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
497 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
498 roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
499 roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
501 /* Currently iter only a chunk */
502 for (hns_roce_hem_first(table->hem[i], &iter);
503 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
504 bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT;
506 spin_lock_irqsave(lock, flags);
508 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
510 end = HW_SYNC_TIMEOUT_MSECS;
512 if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
515 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
516 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
520 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
521 spin_unlock_irqrestore(lock, flags);
525 bt_cmd_l = cpu_to_le32(bt_ba);
526 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
527 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
528 upper_32_bits(bt_ba));
530 bt_cmd_val[0] = bt_cmd_l;
531 bt_cmd_val[1] = bt_cmd_h;
532 hns_roce_write64_k(bt_cmd_val,
533 hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
534 spin_unlock_irqrestore(lock, flags);
540 static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
546 /* Configure SDB/ODB extend mode */
547 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
548 tmp = cpu_to_le32(val);
549 roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
550 roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
551 val = le32_to_cpu(tmp);
552 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
555 static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
562 val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
563 tmp = cpu_to_le32(val);
564 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
565 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
566 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
567 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
568 val = le32_to_cpu(tmp);
569 roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
572 static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
579 val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
580 tmp = cpu_to_le32(val);
581 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
582 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
583 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
584 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
585 val = le32_to_cpu(tmp);
586 roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
589 static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
592 struct hns_roce_v1_priv *priv = hr_dev->priv;
593 struct hns_roce_db_table *db = &priv->db_table;
594 struct device *dev = &hr_dev->pdev->dev;
595 dma_addr_t sdb_dma_addr;
599 /* Configure extend SDB threshold */
600 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
601 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
603 /* Configure extend SDB base addr */
604 sdb_dma_addr = db->ext_db->sdb_buf_list->map;
605 roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
607 /* Configure extend SDB depth */
608 val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
609 tmp = cpu_to_le32(val);
610 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
611 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
612 db->ext_db->esdb_dep);
614 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
615 * using 4K page, and shift more 32 because of
616 * calculating the high 32 bit value evaluated to hardware.
618 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
619 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
620 val = le32_to_cpu(tmp);
621 roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
623 dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
624 dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n",
625 ext_sdb_alept, ext_sdb_alful);
628 static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
631 struct hns_roce_v1_priv *priv = hr_dev->priv;
632 struct hns_roce_db_table *db = &priv->db_table;
633 struct device *dev = &hr_dev->pdev->dev;
634 dma_addr_t odb_dma_addr;
638 /* Configure extend ODB threshold */
639 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
640 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
642 /* Configure extend ODB base addr */
643 odb_dma_addr = db->ext_db->odb_buf_list->map;
644 roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
646 /* Configure extend ODB depth */
647 val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
648 tmp = cpu_to_le32(val);
649 roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
650 ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
651 db->ext_db->eodb_dep);
652 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
653 ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
654 db->ext_db->eodb_dep);
655 val = le32_to_cpu(tmp);
656 roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
658 dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
659 dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
660 ext_odb_alept, ext_odb_alful);
663 static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
666 struct hns_roce_v1_priv *priv = hr_dev->priv;
667 struct hns_roce_db_table *db = &priv->db_table;
668 struct device *dev = &hr_dev->pdev->dev;
669 dma_addr_t sdb_dma_addr;
670 dma_addr_t odb_dma_addr;
673 db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
678 db->ext_db->sdb_buf_list = kmalloc(
679 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
680 if (!db->ext_db->sdb_buf_list) {
682 goto ext_sdb_buf_fail_out;
685 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
686 HNS_ROCE_V1_EXT_SDB_SIZE,
687 &sdb_dma_addr, GFP_KERNEL);
688 if (!db->ext_db->sdb_buf_list->buf) {
690 goto alloc_sq_db_buf_fail;
692 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
694 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
695 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
696 HNS_ROCE_V1_EXT_SDB_ALFUL);
698 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
699 HNS_ROCE_V1_SDB_ALFUL);
702 db->ext_db->odb_buf_list = kmalloc(
703 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
704 if (!db->ext_db->odb_buf_list) {
706 goto ext_odb_buf_fail_out;
709 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
710 HNS_ROCE_V1_EXT_ODB_SIZE,
711 &odb_dma_addr, GFP_KERNEL);
712 if (!db->ext_db->odb_buf_list->buf) {
714 goto alloc_otr_db_buf_fail;
716 db->ext_db->odb_buf_list->map = odb_dma_addr;
718 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
719 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
720 HNS_ROCE_V1_EXT_ODB_ALFUL);
722 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
723 HNS_ROCE_V1_ODB_ALFUL);
725 hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
729 alloc_otr_db_buf_fail:
730 kfree(db->ext_db->odb_buf_list);
732 ext_odb_buf_fail_out:
734 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
735 db->ext_db->sdb_buf_list->buf,
736 db->ext_db->sdb_buf_list->map);
739 alloc_sq_db_buf_fail:
741 kfree(db->ext_db->sdb_buf_list);
743 ext_sdb_buf_fail_out:
748 static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
751 struct device *dev = &hr_dev->pdev->dev;
752 struct ib_qp_init_attr init_attr;
755 memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
756 init_attr.qp_type = IB_QPT_RC;
757 init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
758 init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
759 init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
761 qp = hns_roce_create_qp(pd, &init_attr, NULL);
763 dev_err(dev, "Create loop qp for mr free failed!");
770 static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
772 struct hns_roce_v1_priv *priv = hr_dev->priv;
773 struct hns_roce_free_mr *free_mr = &priv->free_mr;
774 struct hns_roce_caps *caps = &hr_dev->caps;
775 struct ib_device *ibdev = &hr_dev->ib_dev;
776 struct device *dev = &hr_dev->pdev->dev;
777 struct ib_cq_init_attr cq_init_attr;
778 struct ib_qp_attr attr = { 0 };
779 struct hns_roce_qp *hr_qp;
783 __be64 subnet_prefix;
787 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
792 /* Reserved cq for loop qp */
793 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
794 cq_init_attr.comp_vector = 0;
796 cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
800 ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
802 dev_err(dev, "Create cq for reserved loop qp failed!");
803 goto alloc_cq_failed;
805 free_mr->mr_free_cq = to_hr_cq(cq);
806 free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
807 free_mr->mr_free_cq->ib_cq.uobject = NULL;
808 free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
809 free_mr->mr_free_cq->ib_cq.event_handler = NULL;
810 free_mr->mr_free_cq->ib_cq.cq_context = NULL;
811 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
813 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
816 goto alloc_mem_failed;
820 ret = hns_roce_alloc_pd(pd, NULL);
822 goto alloc_pd_failed;
824 free_mr->mr_free_pd = to_hr_pd(pd);
825 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
826 free_mr->mr_free_pd->ibpd.uobject = NULL;
827 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
828 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
830 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
832 attr.min_rnr_timer = 0;
833 /* Disable read ability */
834 attr.max_dest_rd_atomic = 0;
835 attr.max_rd_atomic = 0;
836 /* Use arbitrary values as rq_psn and sq_psn */
837 attr.rq_psn = 0x0808;
838 attr.sq_psn = 0x0808;
842 attr.path_mtu = IB_MTU_256;
843 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
844 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
845 rdma_ah_set_static_rate(&attr.ah_attr, 3);
847 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
848 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
849 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
850 (i % HNS_ROCE_MAX_PORTS);
851 sl = i / HNS_ROCE_MAX_PORTS;
853 for (j = 0; j < caps->num_ports; j++) {
854 if (hr_dev->iboe.phy_port[j] == phy_port) {
864 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
865 if (!free_mr->mr_free_qp[i]) {
866 dev_err(dev, "Create loop qp failed!\n");
868 goto create_lp_qp_failed;
870 hr_qp = free_mr->mr_free_qp[i];
873 hr_qp->phy_port = phy_port;
874 hr_qp->ibqp.qp_type = IB_QPT_RC;
875 hr_qp->ibqp.device = &hr_dev->ib_dev;
876 hr_qp->ibqp.uobject = NULL;
877 atomic_set(&hr_qp->ibqp.usecnt, 0);
879 hr_qp->ibqp.recv_cq = cq;
880 hr_qp->ibqp.send_cq = cq;
882 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
883 rdma_ah_set_sl(&attr.ah_attr, sl);
884 attr.port_num = port + 1;
886 attr.dest_qp_num = hr_qp->qpn;
887 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
888 hr_dev->dev_addr[port],
891 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
892 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
893 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
897 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
899 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
900 IB_QPS_RESET, IB_QPS_INIT);
902 dev_err(dev, "modify qp failed(%d)!\n", ret);
903 goto create_lp_qp_failed;
906 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
907 IB_QPS_INIT, IB_QPS_RTR);
909 dev_err(dev, "modify qp failed(%d)!\n", ret);
910 goto create_lp_qp_failed;
913 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
914 IB_QPS_RTR, IB_QPS_RTS);
916 dev_err(dev, "modify qp failed(%d)!\n", ret);
917 goto create_lp_qp_failed;
924 for (i -= 1; i >= 0; i--) {
925 hr_qp = free_mr->mr_free_qp[i];
926 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
927 dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
930 hns_roce_dealloc_pd(pd, NULL);
936 hns_roce_destroy_cq(cq, NULL);
942 static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
944 struct hns_roce_v1_priv *priv = hr_dev->priv;
945 struct hns_roce_free_mr *free_mr = &priv->free_mr;
946 struct device *dev = &hr_dev->pdev->dev;
947 struct hns_roce_qp *hr_qp;
951 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
952 hr_qp = free_mr->mr_free_qp[i];
956 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
958 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
962 hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
963 kfree(&free_mr->mr_free_cq->ib_cq);
964 hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
965 kfree(&free_mr->mr_free_pd->ibpd);
968 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
970 struct hns_roce_v1_priv *priv = hr_dev->priv;
971 struct hns_roce_db_table *db = &priv->db_table;
972 struct device *dev = &hr_dev->pdev->dev;
979 memset(db, 0, sizeof(*db));
981 /* Default DB mode */
982 sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
983 odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
984 sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
985 odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
987 db->sdb_ext_mod = sdb_ext_mod;
988 db->odb_ext_mod = odb_ext_mod;
991 ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
993 dev_err(dev, "Failed in extend DB configuration.\n");
997 hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
1002 static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
1004 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
1005 struct hns_roce_dev *hr_dev;
1007 lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
1009 hr_dev = to_hr_dev(lp_qp_work->ib_dev);
1011 hns_roce_v1_release_lp_qp(hr_dev);
1013 if (hns_roce_v1_rsv_lp_qp(hr_dev))
1014 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
1016 if (lp_qp_work->comp_flag)
1017 complete(lp_qp_work->comp);
1022 static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
1024 long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
1025 struct hns_roce_v1_priv *priv = hr_dev->priv;
1026 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1027 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
1028 struct device *dev = &hr_dev->pdev->dev;
1029 struct completion comp;
1031 lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
1036 INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
1038 lp_qp_work->ib_dev = &(hr_dev->ib_dev);
1039 lp_qp_work->comp = ∁
1040 lp_qp_work->comp_flag = 1;
1042 init_completion(lp_qp_work->comp);
1044 queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
1047 if (try_wait_for_completion(&comp))
1049 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
1050 end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
1053 lp_qp_work->comp_flag = 0;
1054 if (try_wait_for_completion(&comp))
1057 dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
1061 static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
1063 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
1064 struct device *dev = &hr_dev->pdev->dev;
1065 struct ib_send_wr send_wr;
1066 const struct ib_send_wr *bad_wr;
1069 memset(&send_wr, 0, sizeof(send_wr));
1070 send_wr.next = NULL;
1071 send_wr.num_sge = 0;
1072 send_wr.send_flags = 0;
1073 send_wr.sg_list = NULL;
1074 send_wr.wr_id = (unsigned long long)&send_wr;
1075 send_wr.opcode = IB_WR_RDMA_WRITE;
1077 ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
1079 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
1086 static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1089 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1090 struct hns_roce_mr_free_work *mr_work =
1091 container_of(work, struct hns_roce_mr_free_work, work);
1092 struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
1093 struct hns_roce_v1_priv *priv = hr_dev->priv;
1094 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1095 struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
1096 struct hns_roce_mr *hr_mr = mr_work->mr;
1097 struct device *dev = &hr_dev->pdev->dev;
1098 struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
1099 struct hns_roce_qp *hr_qp;
1104 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1105 hr_qp = free_mr->mr_free_qp[i];
1110 ret = hns_roce_v1_send_lp_wqe(hr_qp);
1113 "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1120 dev_err(dev, "Reserved loop qp is absent!\n");
1125 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1126 if (ret < 0 && hr_qp) {
1128 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1129 hr_qp->qpn, ret, hr_mr->key, ne);
1133 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1134 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1135 } while (ne && time_before_eq(jiffies, end));
1139 "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1143 if (mr_work->comp_flag)
1144 complete(mr_work->comp);
1148 static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1149 struct hns_roce_mr *mr, struct ib_udata *udata)
1151 struct hns_roce_v1_priv *priv = hr_dev->priv;
1152 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1153 long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
1154 struct device *dev = &hr_dev->pdev->dev;
1155 struct hns_roce_mr_free_work *mr_work;
1156 unsigned long start = jiffies;
1157 struct completion comp;
1161 if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
1162 key_to_hw_index(mr->key) &
1163 (hr_dev->caps.num_mtpts - 1)))
1164 dev_warn(dev, "DESTROY_MPT failed!\n");
1167 mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1173 INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1175 mr_work->ib_dev = &(hr_dev->ib_dev);
1176 mr_work->comp = ∁
1177 mr_work->comp_flag = 1;
1178 mr_work->mr = (void *)mr;
1179 init_completion(mr_work->comp);
1181 queue_work(free_mr->free_mr_wq, &(mr_work->work));
1184 if (try_wait_for_completion(&comp))
1186 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1187 end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
1190 mr_work->comp_flag = 0;
1191 if (try_wait_for_completion(&comp))
1194 dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1198 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1199 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1201 ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key));
1202 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
1208 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1210 struct hns_roce_v1_priv *priv = hr_dev->priv;
1211 struct hns_roce_db_table *db = &priv->db_table;
1212 struct device *dev = &hr_dev->pdev->dev;
1214 if (db->sdb_ext_mod) {
1215 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1216 db->ext_db->sdb_buf_list->buf,
1217 db->ext_db->sdb_buf_list->map);
1218 kfree(db->ext_db->sdb_buf_list);
1221 if (db->odb_ext_mod) {
1222 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1223 db->ext_db->odb_buf_list->buf,
1224 db->ext_db->odb_buf_list->map);
1225 kfree(db->ext_db->odb_buf_list);
1231 static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1233 struct hns_roce_v1_priv *priv = hr_dev->priv;
1234 struct hns_roce_raq_table *raq = &priv->raq_table;
1235 struct device *dev = &hr_dev->pdev->dev;
1242 raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1243 if (!raq->e_raq_buf)
1246 raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1248 if (!raq->e_raq_buf->buf) {
1250 goto err_dma_alloc_raq;
1252 raq->e_raq_buf->map = addr;
1254 /* Configure raq extended address. 48bit 4K align */
1255 roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1257 /* Configure raq_shift */
1258 raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1259 val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1260 tmp = cpu_to_le32(val);
1261 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1262 ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1264 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1265 * using 4K page, and shift more 32 because of
1266 * calculating the high 32 bit value evaluated to hardware.
1268 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1269 ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1270 raq->e_raq_buf->map >> 44);
1271 val = le32_to_cpu(tmp);
1272 roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1273 dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1275 /* Configure raq threshold */
1276 val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1277 tmp = cpu_to_le32(val);
1278 roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1279 ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1280 HNS_ROCE_V1_EXT_RAQ_WF);
1281 val = le32_to_cpu(tmp);
1282 roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1283 dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1285 /* Enable extend raq */
1286 val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1287 tmp = cpu_to_le32(val);
1289 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1290 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1291 POL_TIME_INTERVAL_VAL);
1292 roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1294 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1295 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1298 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1299 val = le32_to_cpu(tmp);
1300 roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1301 dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1303 /* Enable raq drop */
1304 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1305 tmp = cpu_to_le32(val);
1306 roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1307 val = le32_to_cpu(tmp);
1308 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1309 dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1314 kfree(raq->e_raq_buf);
1318 static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1320 struct hns_roce_v1_priv *priv = hr_dev->priv;
1321 struct hns_roce_raq_table *raq = &priv->raq_table;
1322 struct device *dev = &hr_dev->pdev->dev;
1324 dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1325 raq->e_raq_buf->map);
1326 kfree(raq->e_raq_buf);
1329 static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1335 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1336 /* Open all ports */
1337 tmp = cpu_to_le32(val);
1338 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1339 ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1341 val = le32_to_cpu(tmp);
1342 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1344 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1345 /* Close all ports */
1346 tmp = cpu_to_le32(val);
1347 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1348 ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1349 val = le32_to_cpu(tmp);
1350 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1354 static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1356 struct hns_roce_v1_priv *priv = hr_dev->priv;
1357 struct device *dev = &hr_dev->pdev->dev;
1360 priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1361 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1363 if (!priv->bt_table.qpc_buf.buf)
1366 priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1367 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1369 if (!priv->bt_table.mtpt_buf.buf) {
1371 goto err_failed_alloc_mtpt_buf;
1374 priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1375 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1377 if (!priv->bt_table.cqc_buf.buf) {
1379 goto err_failed_alloc_cqc_buf;
1384 err_failed_alloc_cqc_buf:
1385 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1386 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1388 err_failed_alloc_mtpt_buf:
1389 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1390 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1395 static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1397 struct hns_roce_v1_priv *priv = hr_dev->priv;
1398 struct device *dev = &hr_dev->pdev->dev;
1400 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1401 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1403 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1404 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1406 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1407 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1410 static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1412 struct hns_roce_v1_priv *priv = hr_dev->priv;
1413 struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
1414 struct device *dev = &hr_dev->pdev->dev;
1417 * This buffer will be used for CQ's tptr(tail pointer), also
1418 * named ci(customer index). Every CQ will use 2 bytes to save
1419 * cqe ci in hip06. Hardware will read this area to get new ci
1420 * when the queue is almost full.
1422 tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1423 &tptr_buf->map, GFP_KERNEL);
1427 hr_dev->tptr_dma_addr = tptr_buf->map;
1428 hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1433 static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1435 struct hns_roce_v1_priv *priv = hr_dev->priv;
1436 struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
1437 struct device *dev = &hr_dev->pdev->dev;
1439 dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1440 tptr_buf->buf, tptr_buf->map);
1443 static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1445 struct hns_roce_v1_priv *priv = hr_dev->priv;
1446 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1447 struct device *dev = &hr_dev->pdev->dev;
1450 free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1451 if (!free_mr->free_mr_wq) {
1452 dev_err(dev, "Create free mr workqueue failed!\n");
1456 ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1458 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1459 destroy_workqueue(free_mr->free_mr_wq);
1465 static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1467 struct hns_roce_v1_priv *priv = hr_dev->priv;
1468 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1470 destroy_workqueue(free_mr->free_mr_wq);
1472 hns_roce_v1_release_lp_qp(hr_dev);
1476 * hns_roce_v1_reset - reset RoCE
1477 * @hr_dev: RoCE device struct pointer
1478 * @dereset: true -- drop reset, false -- reset
1479 * return 0 - success , negative --fail
1481 static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1483 struct device_node *dsaf_node;
1484 struct device *dev = &hr_dev->pdev->dev;
1485 struct device_node *np = dev->of_node;
1486 struct fwnode_handle *fwnode;
1489 /* check if this is DT/ACPI case */
1490 if (dev_of_node(dev)) {
1491 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1493 dev_err(dev, "could not find dsaf-handle\n");
1496 fwnode = &dsaf_node->fwnode;
1497 } else if (is_acpi_device_node(dev->fwnode)) {
1498 struct fwnode_reference_args args;
1500 ret = acpi_node_get_property_reference(dev->fwnode,
1501 "dsaf-handle", 0, &args);
1503 dev_err(dev, "could not find dsaf-handle\n");
1506 fwnode = args.fwnode;
1508 dev_err(dev, "cannot read data from DT or ACPI\n");
1512 ret = hns_dsaf_roce_reset(fwnode, false);
1517 msleep(SLEEP_TIME_INTERVAL);
1518 ret = hns_dsaf_roce_reset(fwnode, true);
1524 static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1526 struct hns_roce_caps *caps = &hr_dev->caps;
1529 hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
1530 hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
1531 hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
1532 ((u64)roce_read(hr_dev,
1533 ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
1534 hr_dev->hw_rev = HNS_ROCE_HW_VER1;
1536 caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
1537 caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
1538 caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
1539 caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
1540 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1541 caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
1542 caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
1543 caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
1544 caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
1545 caps->num_uars = HNS_ROCE_V1_UAR_NUM;
1546 caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
1547 caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
1548 caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
1549 caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1550 caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
1551 caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
1552 caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
1553 caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1554 caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1555 caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1556 caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1557 caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE;
1558 caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1559 caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1560 caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1561 caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1562 caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE;
1563 caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1564 caps->reserved_lkey = 0;
1565 caps->reserved_pds = 0;
1566 caps->reserved_mrws = 1;
1567 caps->reserved_uars = 0;
1568 caps->reserved_cqs = 0;
1569 caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */
1570 caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1572 for (i = 0; i < caps->num_ports; i++)
1573 caps->pkey_table_len[i] = 1;
1575 for (i = 0; i < caps->num_ports; i++) {
1576 /* Six ports shared 16 GID in v1 engine */
1577 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1578 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1581 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1582 caps->num_ports + 1;
1585 caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1586 caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1587 caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
1588 caps->max_mtu = IB_MTU_2048;
1593 static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1598 struct device *dev = &hr_dev->pdev->dev;
1600 /* DMAE user config */
1601 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1602 tmp = cpu_to_le32(val);
1603 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1604 ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1605 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1606 ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1607 1 << PAGES_SHIFT_16);
1608 val = le32_to_cpu(tmp);
1609 roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1611 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1612 tmp = cpu_to_le32(val);
1613 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1614 ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1615 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1616 ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1617 1 << PAGES_SHIFT_16);
1619 ret = hns_roce_db_init(hr_dev);
1621 dev_err(dev, "doorbell init failed!\n");
1625 ret = hns_roce_raq_init(hr_dev);
1627 dev_err(dev, "raq init failed!\n");
1628 goto error_failed_raq_init;
1631 ret = hns_roce_bt_init(hr_dev);
1633 dev_err(dev, "bt init failed!\n");
1634 goto error_failed_bt_init;
1637 ret = hns_roce_tptr_init(hr_dev);
1639 dev_err(dev, "tptr init failed!\n");
1640 goto error_failed_tptr_init;
1643 ret = hns_roce_free_mr_init(hr_dev);
1645 dev_err(dev, "free mr init failed!\n");
1646 goto error_failed_free_mr_init;
1649 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1653 error_failed_free_mr_init:
1654 hns_roce_tptr_free(hr_dev);
1656 error_failed_tptr_init:
1657 hns_roce_bt_free(hr_dev);
1659 error_failed_bt_init:
1660 hns_roce_raq_free(hr_dev);
1662 error_failed_raq_init:
1663 hns_roce_db_free(hr_dev);
1667 static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1669 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1670 hns_roce_free_mr_free(hr_dev);
1671 hns_roce_tptr_free(hr_dev);
1672 hns_roce_bt_free(hr_dev);
1673 hns_roce_raq_free(hr_dev);
1674 hns_roce_db_free(hr_dev);
1677 static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1679 u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1681 return (!!(status & (1 << HCR_GO_BIT)));
1684 static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1685 u64 out_param, u32 in_modifier, u8 op_modifier,
1686 u16 op, u16 token, int event)
1688 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1693 end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1694 while (hns_roce_v1_cmd_pending(hr_dev)) {
1695 if (time_after(jiffies, end)) {
1696 dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1697 (int)jiffies, (int)end);
1703 tmp = cpu_to_le32(val);
1704 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1706 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1707 ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1708 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1709 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1710 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1711 ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1713 val = le32_to_cpu(tmp);
1714 writeq(in_param, hcr + 0);
1715 writeq(out_param, hcr + 2);
1716 writel(in_modifier, hcr + 4);
1717 /* Memory barrier */
1720 writel(val, hcr + 5);
1725 static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1726 unsigned int timeout)
1728 u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1732 end = msecs_to_jiffies(timeout) + jiffies;
1733 while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1736 if (hns_roce_v1_cmd_pending(hr_dev)) {
1737 dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1741 status = le32_to_cpu((__force __le32)
1742 __raw_readl(hcr + HCR_STATUS_OFFSET));
1743 if ((status & STATUS_MASK) != 0x1) {
1744 dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1751 static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
1752 int gid_index, const union ib_gid *gid,
1753 const struct ib_gid_attr *attr)
1755 unsigned long flags;
1759 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1761 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
1763 p = (u32 *)&gid->raw[0];
1764 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1765 (HNS_ROCE_V1_GID_NUM * gid_idx));
1767 p = (u32 *)&gid->raw[4];
1768 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1769 (HNS_ROCE_V1_GID_NUM * gid_idx));
1771 p = (u32 *)&gid->raw[8];
1772 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1773 (HNS_ROCE_V1_GID_NUM * gid_idx));
1775 p = (u32 *)&gid->raw[0xc];
1776 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1777 (HNS_ROCE_V1_GID_NUM * gid_idx));
1779 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
1784 static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1795 * When mac changed, loopback may fail
1796 * because of smac not equal to dmac.
1797 * We Need to release and create reserved qp again.
1799 if (hr_dev->hw->dereg_mr) {
1802 ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1803 if (ret && ret != -ETIMEDOUT)
1807 p = (u32 *)(&addr[0]);
1809 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1810 PHY_PORT_OFFSET * phy_port);
1812 val = roce_read(hr_dev,
1813 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1814 tmp = cpu_to_le32(val);
1815 p_h = (u16 *)(&addr[4]);
1817 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1818 ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1819 val = le32_to_cpu(tmp);
1820 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1826 static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1832 val = roce_read(hr_dev,
1833 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1834 tmp = cpu_to_le32(val);
1835 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1836 ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1837 val = le32_to_cpu(tmp);
1838 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1842 static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
1843 struct hns_roce_mr *mr,
1844 unsigned long mtpt_idx)
1846 u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
1847 struct ib_device *ibdev = &hr_dev->ib_dev;
1848 struct hns_roce_v1_mpt_entry *mpt_entry;
1853 /* MPT filled into mailbox buf */
1854 mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1855 memset(mpt_entry, 0, sizeof(*mpt_entry));
1857 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1858 MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1859 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1860 MPT_BYTE_4_KEY_S, mr->key);
1861 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1862 MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1863 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1864 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1865 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1866 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1867 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1868 MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1869 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1870 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1871 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1872 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1873 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1874 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1875 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1876 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1878 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1880 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1881 MPT_BYTE_12_PBL_ADDR_H_S, 0);
1882 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1883 MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1885 mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
1886 mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
1887 mpt_entry->length = cpu_to_le32((u32)mr->size);
1889 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1890 MPT_BYTE_28_PD_S, mr->pd);
1891 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1892 MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1893 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1894 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1896 /* DMA memory register */
1897 if (mr->type == MR_TYPE_DMA)
1900 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
1901 ARRAY_SIZE(pages), &pbl_ba);
1903 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
1907 /* Register user mr */
1908 for (i = 0; i < count; i++) {
1911 mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1912 roce_set_field(mpt_entry->mpt_byte_36,
1913 MPT_BYTE_36_PA0_H_M,
1914 MPT_BYTE_36_PA0_H_S,
1915 (u32)(pages[i] >> PAGES_SHIFT_32));
1918 roce_set_field(mpt_entry->mpt_byte_36,
1919 MPT_BYTE_36_PA1_L_M,
1920 MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
1921 roce_set_field(mpt_entry->mpt_byte_40,
1922 MPT_BYTE_40_PA1_H_M,
1923 MPT_BYTE_40_PA1_H_S,
1924 (u32)(pages[i] >> PAGES_SHIFT_24));
1927 roce_set_field(mpt_entry->mpt_byte_40,
1928 MPT_BYTE_40_PA2_L_M,
1929 MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
1930 roce_set_field(mpt_entry->mpt_byte_44,
1931 MPT_BYTE_44_PA2_H_M,
1932 MPT_BYTE_44_PA2_H_S,
1933 (u32)(pages[i] >> PAGES_SHIFT_16));
1936 roce_set_field(mpt_entry->mpt_byte_44,
1937 MPT_BYTE_44_PA3_L_M,
1938 MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
1939 roce_set_field(mpt_entry->mpt_byte_48,
1940 MPT_BYTE_48_PA3_H_M,
1941 MPT_BYTE_48_PA3_H_S,
1942 (u32)(pages[i] >> PAGES_SHIFT_8));
1945 mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1946 roce_set_field(mpt_entry->mpt_byte_56,
1947 MPT_BYTE_56_PA4_H_M,
1948 MPT_BYTE_56_PA4_H_S,
1949 (u32)(pages[i] >> PAGES_SHIFT_32));
1952 roce_set_field(mpt_entry->mpt_byte_56,
1953 MPT_BYTE_56_PA5_L_M,
1954 MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
1955 roce_set_field(mpt_entry->mpt_byte_60,
1956 MPT_BYTE_60_PA5_H_M,
1957 MPT_BYTE_60_PA5_H_S,
1958 (u32)(pages[i] >> PAGES_SHIFT_24));
1961 roce_set_field(mpt_entry->mpt_byte_60,
1962 MPT_BYTE_60_PA6_L_M,
1963 MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
1964 roce_set_field(mpt_entry->mpt_byte_64,
1965 MPT_BYTE_64_PA6_H_M,
1966 MPT_BYTE_64_PA6_H_S,
1967 (u32)(pages[i] >> PAGES_SHIFT_16));
1974 mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
1975 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1976 MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
1981 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1983 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
1986 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1988 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1990 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1991 return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1992 !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
1995 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1997 return get_sw_cqe(hr_cq, hr_cq->cons_index);
2000 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2004 doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
2006 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2007 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2008 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2009 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2010 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
2011 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2012 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
2014 hns_roce_write64_k(doorbell, hr_cq->db_reg);
2017 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2018 struct hns_roce_srq *srq)
2020 struct hns_roce_cqe *cqe, *dest;
2025 for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
2027 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2032 * Now backwards through the CQ, removing CQ entries
2033 * that match our QP by overwriting them with next entries.
2035 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2036 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2037 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2038 CQE_BYTE_16_LOCAL_QPN_S) &
2039 HNS_ROCE_CQE_QPN_MASK) == qpn) {
2040 /* In v1 engine, not support SRQ */
2042 } else if (nfreed) {
2043 dest = get_cqe(hr_cq, (prod_index + nfreed) &
2045 owner_bit = roce_get_bit(dest->cqe_byte_4,
2046 CQE_BYTE_4_OWNER_S);
2047 memcpy(dest, cqe, sizeof(*cqe));
2048 roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
2054 hr_cq->cons_index += nfreed;
2055 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2059 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2060 struct hns_roce_srq *srq)
2062 spin_lock_irq(&hr_cq->lock);
2063 __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
2064 spin_unlock_irq(&hr_cq->lock);
2067 static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
2068 struct hns_roce_cq *hr_cq, void *mb_buf,
2069 u64 *mtts, dma_addr_t dma_handle)
2071 struct hns_roce_v1_priv *priv = hr_dev->priv;
2072 struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
2073 struct hns_roce_cq_context *cq_context = mb_buf;
2074 dma_addr_t tptr_dma_addr;
2077 memset(cq_context, 0, sizeof(*cq_context));
2079 /* Get the tptr for this CQ. */
2080 offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2081 tptr_dma_addr = tptr_buf->map + offset;
2082 hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2084 /* Register cq_context members */
2085 roce_set_field(cq_context->cqc_byte_4,
2086 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2087 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2088 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2089 CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2091 cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
2093 roce_set_field(cq_context->cqc_byte_12,
2094 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2095 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2096 ((u64)dma_handle >> 32));
2097 roce_set_field(cq_context->cqc_byte_12,
2098 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2099 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2100 ilog2(hr_cq->cq_depth));
2101 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2102 CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
2104 cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
2106 roce_set_field(cq_context->cqc_byte_20,
2107 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2108 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
2109 /* Dedicated hardware, directly set 0 */
2110 roce_set_field(cq_context->cqc_byte_20,
2111 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2112 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2114 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2115 * using 4K page, and shift more 32 because of
2116 * calculating the high 32 bit value evaluated to hardware.
2118 roce_set_field(cq_context->cqc_byte_20,
2119 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2120 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2121 tptr_dma_addr >> 44);
2123 cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
2125 roce_set_field(cq_context->cqc_byte_32,
2126 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2127 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2128 roce_set_bit(cq_context->cqc_byte_32,
2129 CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2130 roce_set_bit(cq_context->cqc_byte_32,
2131 CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2132 roce_set_bit(cq_context->cqc_byte_32,
2133 CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2134 roce_set_bit(cq_context->cqc_byte_32,
2135 CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2137 /* The initial value of cq's ci is 0 */
2138 roce_set_field(cq_context->cqc_byte_32,
2139 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2140 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2143 static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2144 enum ib_cq_notify_flags flags)
2146 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2147 u32 notification_flag;
2148 __le32 doorbell[2] = {};
2150 notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2151 IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2153 * flags = 0; Notification Flag = 1, next
2154 * flags = 1; Notification Flag = 0, solocited
2157 cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2158 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2159 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2160 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2161 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2162 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2163 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2164 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2165 hr_cq->cqn | notification_flag);
2167 hns_roce_write64_k(doorbell, hr_cq->db_reg);
2172 static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2173 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2180 struct hns_roce_cqe *cqe;
2181 struct hns_roce_qp *hr_qp;
2182 struct hns_roce_wq *wq;
2183 struct hns_roce_wqe_ctrl_seg *sq_wqe;
2184 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2185 struct device *dev = &hr_dev->pdev->dev;
2187 /* Find cqe according consumer index */
2188 cqe = next_cqe_sw(hr_cq);
2192 ++hr_cq->cons_index;
2193 /* Memory barrier */
2196 is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2198 /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2199 if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2200 CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2201 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2202 CQE_BYTE_20_PORT_NUM_S) +
2203 roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2204 CQE_BYTE_16_LOCAL_QPN_S) *
2207 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2208 CQE_BYTE_16_LOCAL_QPN_S);
2211 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2212 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2213 if (unlikely(!hr_qp)) {
2214 dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2215 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2222 wc->qp = &(*cur_qp)->ibqp;
2225 status = roce_get_field(cqe->cqe_byte_4,
2226 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2227 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2228 HNS_ROCE_CQE_STATUS_MASK;
2230 case HNS_ROCE_CQE_SUCCESS:
2231 wc->status = IB_WC_SUCCESS;
2233 case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2234 wc->status = IB_WC_LOC_LEN_ERR;
2236 case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2237 wc->status = IB_WC_LOC_QP_OP_ERR;
2239 case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2240 wc->status = IB_WC_LOC_PROT_ERR;
2242 case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2243 wc->status = IB_WC_WR_FLUSH_ERR;
2245 case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2246 wc->status = IB_WC_MW_BIND_ERR;
2248 case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2249 wc->status = IB_WC_BAD_RESP_ERR;
2251 case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2252 wc->status = IB_WC_LOC_ACCESS_ERR;
2254 case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2255 wc->status = IB_WC_REM_INV_REQ_ERR;
2257 case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2258 wc->status = IB_WC_REM_ACCESS_ERR;
2260 case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2261 wc->status = IB_WC_REM_OP_ERR;
2263 case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2264 wc->status = IB_WC_RETRY_EXC_ERR;
2266 case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2267 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2270 wc->status = IB_WC_GENERAL_ERR;
2274 /* CQE status error, directly return */
2275 if (wc->status != IB_WC_SUCCESS)
2279 /* SQ conrespond to CQE */
2280 sq_wqe = hns_roce_get_send_wqe(*cur_qp,
2281 roce_get_field(cqe->cqe_byte_4,
2282 CQE_BYTE_4_WQE_INDEX_M,
2283 CQE_BYTE_4_WQE_INDEX_S) &
2284 ((*cur_qp)->sq.wqe_cnt-1));
2285 switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2286 case HNS_ROCE_WQE_OPCODE_SEND:
2287 wc->opcode = IB_WC_SEND;
2289 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2290 wc->opcode = IB_WC_RDMA_READ;
2291 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2293 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2294 wc->opcode = IB_WC_RDMA_WRITE;
2296 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2297 wc->opcode = IB_WC_LOCAL_INV;
2299 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2300 wc->opcode = IB_WC_SEND;
2303 wc->status = IB_WC_GENERAL_ERR;
2306 wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2307 IB_WC_WITH_IMM : 0);
2309 wq = &(*cur_qp)->sq;
2310 if ((*cur_qp)->sq_signal_bits) {
2312 * If sg_signal_bit is 1,
2313 * firstly tail pointer updated to wqe
2314 * which current cqe correspond to
2316 wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2317 CQE_BYTE_4_WQE_INDEX_M,
2318 CQE_BYTE_4_WQE_INDEX_S);
2319 wq->tail += (wqe_ctr - (u16)wq->tail) &
2322 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2325 /* RQ conrespond to CQE */
2326 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2327 opcode = roce_get_field(cqe->cqe_byte_4,
2328 CQE_BYTE_4_OPERATION_TYPE_M,
2329 CQE_BYTE_4_OPERATION_TYPE_S) &
2330 HNS_ROCE_CQE_OPCODE_MASK;
2332 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2333 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2334 wc->wc_flags = IB_WC_WITH_IMM;
2336 cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2338 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2339 if (roce_get_bit(cqe->cqe_byte_4,
2340 CQE_BYTE_4_IMM_INDICATOR_S)) {
2341 wc->opcode = IB_WC_RECV;
2342 wc->wc_flags = IB_WC_WITH_IMM;
2343 wc->ex.imm_data = cpu_to_be32(
2344 le32_to_cpu(cqe->immediate_data));
2346 wc->opcode = IB_WC_RECV;
2351 wc->status = IB_WC_GENERAL_ERR;
2355 /* Update tail pointer, record wr_id */
2356 wq = &(*cur_qp)->rq;
2357 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2359 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2361 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2362 CQE_BYTE_20_REMOTE_QPN_M,
2363 CQE_BYTE_20_REMOTE_QPN_S);
2364 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2365 CQE_BYTE_20_GRH_PRESENT_S) ?
2367 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2368 CQE_BYTE_28_P_KEY_IDX_M,
2369 CQE_BYTE_28_P_KEY_IDX_S);
2375 int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2377 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2378 struct hns_roce_qp *cur_qp = NULL;
2379 unsigned long flags;
2383 spin_lock_irqsave(&hr_cq->lock, flags);
2385 for (npolled = 0; npolled < num_entries; ++npolled) {
2386 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2392 *hr_cq->tptr_addr = hr_cq->cons_index &
2393 ((hr_cq->cq_depth << 1) - 1);
2395 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2398 spin_unlock_irqrestore(&hr_cq->lock, flags);
2400 if (ret == 0 || ret == -EAGAIN)
2406 static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2407 struct hns_roce_hem_table *table, int obj,
2410 struct hns_roce_v1_priv *priv = hr_dev->priv;
2411 struct device *dev = &hr_dev->pdev->dev;
2412 long end = HW_SYNC_TIMEOUT_MSECS;
2413 __le32 bt_cmd_val[2] = {0};
2414 unsigned long flags = 0;
2415 void __iomem *bt_cmd;
2418 switch (table->type) {
2420 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2423 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2426 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2429 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2434 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2435 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
2436 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2437 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2438 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2439 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2441 spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2443 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2446 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2448 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2449 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2456 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
2457 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
2460 bt_cmd_val[0] = cpu_to_le32(bt_ba);
2461 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2462 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2463 hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2465 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2470 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2471 enum hns_roce_qp_state cur_state,
2472 enum hns_roce_qp_state new_state,
2473 struct hns_roce_qp_context *context,
2474 struct hns_roce_qp *hr_qp)
2477 op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2478 [HNS_ROCE_QP_STATE_RST] = {
2479 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2480 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2481 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2483 [HNS_ROCE_QP_STATE_INIT] = {
2484 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2485 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2486 /* Note: In v1 engine, HW doesn't support RST2INIT.
2487 * We use RST2INIT cmd instead of INIT2INIT.
2489 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2490 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2492 [HNS_ROCE_QP_STATE_RTR] = {
2493 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2494 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2495 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2497 [HNS_ROCE_QP_STATE_RTS] = {
2498 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2499 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2500 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2501 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2503 [HNS_ROCE_QP_STATE_SQD] = {
2504 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2505 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2506 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2507 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2509 [HNS_ROCE_QP_STATE_ERR] = {
2510 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2511 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2515 struct hns_roce_cmd_mailbox *mailbox;
2516 struct device *dev = &hr_dev->pdev->dev;
2519 if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2520 new_state >= HNS_ROCE_QP_NUM_STATE ||
2521 !op[cur_state][new_state]) {
2522 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2523 cur_state, new_state);
2527 if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2528 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2529 HNS_ROCE_CMD_2RST_QP,
2530 HNS_ROCE_CMD_TIMEOUT_MSECS);
2532 if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2533 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2534 HNS_ROCE_CMD_2ERR_QP,
2535 HNS_ROCE_CMD_TIMEOUT_MSECS);
2537 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2538 if (IS_ERR(mailbox))
2539 return PTR_ERR(mailbox);
2541 memcpy(mailbox->buf, context, sizeof(*context));
2543 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2544 op[cur_state][new_state],
2545 HNS_ROCE_CMD_TIMEOUT_MSECS);
2547 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2551 static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
2552 u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
2554 struct ib_device *ibdev = &hr_dev->ib_dev;
2557 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
2559 ibdev_err(ibdev, "Failed to find SQ ba\n");
2563 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba,
2566 ibdev_err(ibdev, "Failed to find RQ ba\n");
2573 static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2574 int attr_mask, enum ib_qp_state cur_state,
2575 enum ib_qp_state new_state)
2577 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2578 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2579 struct hns_roce_sqp_context *context;
2580 dma_addr_t dma_handle = 0;
2587 context = kzalloc(sizeof(*context), GFP_KERNEL);
2591 /* Search QP buf's MTTs */
2592 if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
2595 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2596 roce_set_field(context->qp1c_bytes_4,
2597 QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2598 QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2599 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2600 roce_set_field(context->qp1c_bytes_4,
2601 QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2602 QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2603 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2604 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2605 QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2607 context->sq_rq_bt_l = cpu_to_le32(dma_handle);
2608 roce_set_field(context->qp1c_bytes_12,
2609 QP1C_BYTES_12_SQ_RQ_BT_H_M,
2610 QP1C_BYTES_12_SQ_RQ_BT_H_S,
2611 upper_32_bits(dma_handle));
2613 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2614 QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2615 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2616 QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2617 roce_set_bit(context->qp1c_bytes_16,
2618 QP1C_BYTES_16_SIGNALING_TYPE_S,
2619 hr_qp->sq_signal_bits);
2620 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2622 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2624 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2627 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2628 QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2629 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2630 QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2632 context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
2634 roce_set_field(context->qp1c_bytes_28,
2635 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2636 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2637 upper_32_bits(rq_ba));
2638 roce_set_field(context->qp1c_bytes_28,
2639 QP1C_BYTES_28_RQ_CUR_IDX_M,
2640 QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2642 roce_set_field(context->qp1c_bytes_32,
2643 QP1C_BYTES_32_RX_CQ_NUM_M,
2644 QP1C_BYTES_32_RX_CQ_NUM_S,
2645 to_hr_cq(ibqp->recv_cq)->cqn);
2646 roce_set_field(context->qp1c_bytes_32,
2647 QP1C_BYTES_32_TX_CQ_NUM_M,
2648 QP1C_BYTES_32_TX_CQ_NUM_S,
2649 to_hr_cq(ibqp->send_cq)->cqn);
2651 context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
2653 roce_set_field(context->qp1c_bytes_40,
2654 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2655 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2656 upper_32_bits(sq_ba));
2657 roce_set_field(context->qp1c_bytes_40,
2658 QP1C_BYTES_40_SQ_CUR_IDX_M,
2659 QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2661 /* Copy context to QP1C register */
2662 addr = (u32 __iomem *)(hr_dev->reg_base +
2663 ROCEE_QP1C_CFG0_0_REG +
2664 hr_qp->phy_port * sizeof(*context));
2666 writel(le32_to_cpu(context->qp1c_bytes_4), addr);
2667 writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
2668 writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
2669 writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
2670 writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
2671 writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
2672 writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
2673 writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
2674 writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
2675 writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
2678 /* Modify QP1C status */
2679 reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2680 hr_qp->phy_port * sizeof(*context));
2681 tmp = cpu_to_le32(reg_val);
2682 roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2683 ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2684 reg_val = le32_to_cpu(tmp);
2685 roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2686 hr_qp->phy_port * sizeof(*context), reg_val);
2688 hr_qp->state = new_state;
2689 if (new_state == IB_QPS_RESET) {
2690 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2691 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2692 if (ibqp->send_cq != ibqp->recv_cq)
2693 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2710 static bool check_qp_state(enum ib_qp_state cur_state,
2711 enum ib_qp_state new_state)
2713 static const bool sm[][IB_QPS_ERR + 1] = {
2714 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
2715 [IB_QPS_INIT] = true },
2716 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
2717 [IB_QPS_INIT] = true,
2718 [IB_QPS_RTR] = true,
2719 [IB_QPS_ERR] = true },
2720 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
2721 [IB_QPS_RTS] = true,
2722 [IB_QPS_ERR] = true },
2723 [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
2726 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
2729 return sm[cur_state][new_state];
2732 static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2733 int attr_mask, enum ib_qp_state cur_state,
2734 enum ib_qp_state new_state)
2736 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2737 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2738 struct device *dev = &hr_dev->pdev->dev;
2739 struct hns_roce_qp_context *context;
2740 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2741 dma_addr_t dma_handle_2 = 0;
2742 dma_addr_t dma_handle = 0;
2743 __le32 doorbell[2] = {0};
2753 if (!check_qp_state(cur_state, new_state)) {
2754 ibdev_err(ibqp->device,
2755 "not support QP(%u) status from %d to %d\n",
2756 ibqp->qp_num, cur_state, new_state);
2760 context = kzalloc(sizeof(*context), GFP_KERNEL);
2764 /* Search qp buf's mtts */
2765 if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
2768 /* Search IRRL's mtts */
2769 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2770 hr_qp->qpn, &dma_handle_2);
2771 if (mtts_2 == NULL) {
2772 dev_err(dev, "qp irrl_table find failed\n");
2779 * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2780 * Optional param: NA
2782 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2783 roce_set_field(context->qpc_bytes_4,
2784 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2785 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2786 to_hr_qp_type(hr_qp->ibqp.qp_type));
2788 roce_set_bit(context->qpc_bytes_4,
2789 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2790 roce_set_bit(context->qpc_bytes_4,
2791 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2792 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2793 roce_set_bit(context->qpc_bytes_4,
2794 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2795 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2797 roce_set_bit(context->qpc_bytes_4,
2798 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2799 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2801 roce_set_bit(context->qpc_bytes_4,
2802 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2803 roce_set_field(context->qpc_bytes_4,
2804 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2805 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2806 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2807 roce_set_field(context->qpc_bytes_4,
2808 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2809 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2810 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2811 roce_set_field(context->qpc_bytes_4,
2812 QP_CONTEXT_QPC_BYTES_4_PD_M,
2813 QP_CONTEXT_QPC_BYTES_4_PD_S,
2814 to_hr_pd(ibqp->pd)->pdn);
2815 hr_qp->access_flags = attr->qp_access_flags;
2816 roce_set_field(context->qpc_bytes_8,
2817 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2818 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2819 to_hr_cq(ibqp->send_cq)->cqn);
2820 roce_set_field(context->qpc_bytes_8,
2821 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2822 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2823 to_hr_cq(ibqp->recv_cq)->cqn);
2826 roce_set_field(context->qpc_bytes_12,
2827 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2828 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2829 to_hr_srq(ibqp->srq)->srqn);
2831 roce_set_field(context->qpc_bytes_12,
2832 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2833 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2835 hr_qp->pkey_index = attr->pkey_index;
2836 roce_set_field(context->qpc_bytes_16,
2837 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2838 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2839 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2840 roce_set_field(context->qpc_bytes_4,
2841 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2842 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2843 to_hr_qp_type(hr_qp->ibqp.qp_type));
2844 roce_set_bit(context->qpc_bytes_4,
2845 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2846 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2847 roce_set_bit(context->qpc_bytes_4,
2848 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2849 !!(attr->qp_access_flags &
2850 IB_ACCESS_REMOTE_READ));
2851 roce_set_bit(context->qpc_bytes_4,
2852 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2853 !!(attr->qp_access_flags &
2854 IB_ACCESS_REMOTE_WRITE));
2856 roce_set_bit(context->qpc_bytes_4,
2857 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2858 !!(hr_qp->access_flags &
2859 IB_ACCESS_REMOTE_READ));
2860 roce_set_bit(context->qpc_bytes_4,
2861 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2862 !!(hr_qp->access_flags &
2863 IB_ACCESS_REMOTE_WRITE));
2866 roce_set_bit(context->qpc_bytes_4,
2867 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2868 roce_set_field(context->qpc_bytes_4,
2869 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2870 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2871 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2872 roce_set_field(context->qpc_bytes_4,
2873 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2874 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2875 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2876 roce_set_field(context->qpc_bytes_4,
2877 QP_CONTEXT_QPC_BYTES_4_PD_M,
2878 QP_CONTEXT_QPC_BYTES_4_PD_S,
2879 to_hr_pd(ibqp->pd)->pdn);
2881 roce_set_field(context->qpc_bytes_8,
2882 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2883 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2884 to_hr_cq(ibqp->send_cq)->cqn);
2885 roce_set_field(context->qpc_bytes_8,
2886 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2887 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2888 to_hr_cq(ibqp->recv_cq)->cqn);
2891 roce_set_field(context->qpc_bytes_12,
2892 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2893 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2894 to_hr_srq(ibqp->srq)->srqn);
2895 if (attr_mask & IB_QP_PKEY_INDEX)
2896 roce_set_field(context->qpc_bytes_12,
2897 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2898 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2901 roce_set_field(context->qpc_bytes_12,
2902 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2903 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2906 roce_set_field(context->qpc_bytes_16,
2907 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2908 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2909 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2910 if ((attr_mask & IB_QP_ALT_PATH) ||
2911 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2912 (attr_mask & IB_QP_PKEY_INDEX) ||
2913 (attr_mask & IB_QP_QKEY)) {
2914 dev_err(dev, "INIT2RTR attr_mask error\n");
2918 dmac = (u8 *)attr->ah_attr.roce.dmac;
2920 context->sq_rq_bt_l = cpu_to_le32(dma_handle);
2921 roce_set_field(context->qpc_bytes_24,
2922 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2923 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2924 upper_32_bits(dma_handle));
2925 roce_set_bit(context->qpc_bytes_24,
2926 QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2928 roce_set_field(context->qpc_bytes_24,
2929 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2930 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2931 attr->min_rnr_timer);
2932 context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
2933 roce_set_field(context->qpc_bytes_32,
2934 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2935 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2936 ((u32)(dma_handle_2 >> 32)) &
2937 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2938 roce_set_field(context->qpc_bytes_32,
2939 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2940 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2941 roce_set_bit(context->qpc_bytes_32,
2942 QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2944 roce_set_bit(context->qpc_bytes_32,
2945 QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2946 hr_qp->sq_signal_bits);
2948 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2950 smac = (u8 *)hr_dev->dev_addr[port];
2951 /* when dmac equals smac or loop_idc is 1, it should loopback */
2952 if (ether_addr_equal_unaligned(dmac, smac) ||
2953 hr_dev->loop_idc == 0x1)
2954 roce_set_bit(context->qpc_bytes_32,
2955 QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2957 roce_set_bit(context->qpc_bytes_32,
2958 QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2959 rdma_ah_get_ah_flags(&attr->ah_attr));
2960 roce_set_field(context->qpc_bytes_32,
2961 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2962 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2963 ilog2((unsigned int)attr->max_dest_rd_atomic));
2965 if (attr_mask & IB_QP_DEST_QPN)
2966 roce_set_field(context->qpc_bytes_36,
2967 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2968 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2971 /* Configure GID index */
2972 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2973 roce_set_field(context->qpc_bytes_36,
2974 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2975 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2976 hns_get_gid_index(hr_dev,
2980 memcpy(&(context->dmac_l), dmac, 4);
2982 roce_set_field(context->qpc_bytes_44,
2983 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2984 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2985 *((u16 *)(&dmac[4])));
2986 roce_set_field(context->qpc_bytes_44,
2987 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2988 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2989 rdma_ah_get_static_rate(&attr->ah_attr));
2990 roce_set_field(context->qpc_bytes_44,
2991 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2992 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2995 roce_set_field(context->qpc_bytes_48,
2996 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2997 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2999 roce_set_field(context->qpc_bytes_48,
3000 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3001 QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
3002 grh->traffic_class);
3003 roce_set_field(context->qpc_bytes_48,
3004 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3005 QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
3007 memcpy(context->dgid, grh->dgid.raw,
3008 sizeof(grh->dgid.raw));
3010 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
3011 roce_get_field(context->qpc_bytes_44,
3012 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
3013 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
3015 roce_set_field(context->qpc_bytes_68,
3016 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
3017 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
3019 roce_set_field(context->qpc_bytes_68,
3020 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
3021 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
3023 context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
3025 roce_set_field(context->qpc_bytes_76,
3026 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
3027 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
3028 upper_32_bits(rq_ba));
3029 roce_set_field(context->qpc_bytes_76,
3030 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
3031 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
3033 context->rx_rnr_time = 0;
3035 roce_set_field(context->qpc_bytes_84,
3036 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
3037 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
3039 roce_set_field(context->qpc_bytes_84,
3040 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
3041 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
3043 roce_set_field(context->qpc_bytes_88,
3044 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3045 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
3047 roce_set_bit(context->qpc_bytes_88,
3048 QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
3049 roce_set_bit(context->qpc_bytes_88,
3050 QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
3051 roce_set_field(context->qpc_bytes_88,
3052 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
3053 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
3055 roce_set_field(context->qpc_bytes_88,
3056 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
3057 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
3060 context->dma_length = 0;
3065 roce_set_field(context->qpc_bytes_108,
3066 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3067 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3068 roce_set_bit(context->qpc_bytes_108,
3069 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3070 roce_set_bit(context->qpc_bytes_108,
3071 QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3073 roce_set_field(context->qpc_bytes_112,
3074 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3075 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3076 roce_set_field(context->qpc_bytes_112,
3077 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3078 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3080 /* For chip resp ack */
3081 roce_set_field(context->qpc_bytes_156,
3082 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3083 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3085 roce_set_field(context->qpc_bytes_156,
3086 QP_CONTEXT_QPC_BYTES_156_SL_M,
3087 QP_CONTEXT_QPC_BYTES_156_SL_S,
3088 rdma_ah_get_sl(&attr->ah_attr));
3089 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3090 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3091 /* If exist optional param, return error */
3092 if ((attr_mask & IB_QP_ALT_PATH) ||
3093 (attr_mask & IB_QP_ACCESS_FLAGS) ||
3094 (attr_mask & IB_QP_QKEY) ||
3095 (attr_mask & IB_QP_PATH_MIG_STATE) ||
3096 (attr_mask & IB_QP_CUR_STATE) ||
3097 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3098 dev_err(dev, "RTR2RTS attr_mask error\n");
3102 context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
3104 roce_set_field(context->qpc_bytes_120,
3105 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3106 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3107 upper_32_bits(sq_ba));
3109 roce_set_field(context->qpc_bytes_124,
3110 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3111 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3112 roce_set_field(context->qpc_bytes_124,
3113 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3114 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3116 roce_set_field(context->qpc_bytes_128,
3117 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3118 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3120 roce_set_bit(context->qpc_bytes_128,
3121 QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3122 roce_set_field(context->qpc_bytes_128,
3123 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3124 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3126 roce_set_bit(context->qpc_bytes_128,
3127 QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3129 roce_set_field(context->qpc_bytes_132,
3130 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3131 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3132 roce_set_field(context->qpc_bytes_132,
3133 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3134 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3136 roce_set_field(context->qpc_bytes_136,
3137 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3138 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3140 roce_set_field(context->qpc_bytes_136,
3141 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3142 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3145 roce_set_field(context->qpc_bytes_140,
3146 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3147 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3148 (attr->sq_psn >> SQ_PSN_SHIFT));
3149 roce_set_field(context->qpc_bytes_140,
3150 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3151 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3152 roce_set_bit(context->qpc_bytes_140,
3153 QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3155 roce_set_field(context->qpc_bytes_148,
3156 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3157 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3158 roce_set_field(context->qpc_bytes_148,
3159 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3160 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3162 roce_set_field(context->qpc_bytes_148,
3163 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3164 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3166 roce_set_field(context->qpc_bytes_148,
3167 QP_CONTEXT_QPC_BYTES_148_LSN_M,
3168 QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3170 context->rnr_retry = 0;
3172 roce_set_field(context->qpc_bytes_156,
3173 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3174 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3176 if (attr->timeout < 0x12) {
3177 dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3179 roce_set_field(context->qpc_bytes_156,
3180 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3181 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3184 roce_set_field(context->qpc_bytes_156,
3185 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3186 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3189 roce_set_field(context->qpc_bytes_156,
3190 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3191 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3193 roce_set_field(context->qpc_bytes_156,
3194 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3195 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3197 roce_set_field(context->qpc_bytes_156,
3198 QP_CONTEXT_QPC_BYTES_156_SL_M,
3199 QP_CONTEXT_QPC_BYTES_156_SL_S,
3200 rdma_ah_get_sl(&attr->ah_attr));
3201 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3202 roce_set_field(context->qpc_bytes_156,
3203 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3204 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3205 ilog2((unsigned int)attr->max_rd_atomic));
3206 roce_set_field(context->qpc_bytes_156,
3207 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3208 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3209 context->pkt_use_len = 0;
3211 roce_set_field(context->qpc_bytes_164,
3212 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3213 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3214 roce_set_field(context->qpc_bytes_164,
3215 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3216 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3218 roce_set_field(context->qpc_bytes_168,
3219 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3220 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3222 roce_set_field(context->qpc_bytes_168,
3223 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3224 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3225 roce_set_field(context->qpc_bytes_168,
3226 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3227 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3228 roce_set_bit(context->qpc_bytes_168,
3229 QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3230 roce_set_bit(context->qpc_bytes_168,
3231 QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3232 roce_set_bit(context->qpc_bytes_168,
3233 QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3234 context->sge_use_len = 0;
3236 roce_set_field(context->qpc_bytes_176,
3237 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3238 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3239 roce_set_field(context->qpc_bytes_176,
3240 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3241 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3243 roce_set_field(context->qpc_bytes_180,
3244 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3245 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3246 roce_set_field(context->qpc_bytes_180,
3247 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3248 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3250 context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
3252 roce_set_field(context->qpc_bytes_188,
3253 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3254 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3255 upper_32_bits(sq_ba));
3256 roce_set_bit(context->qpc_bytes_188,
3257 QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3258 roce_set_field(context->qpc_bytes_188,
3259 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3260 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3264 /* Every status migrate must change state */
3265 roce_set_field(context->qpc_bytes_144,
3266 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3267 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3269 /* SW pass context to HW */
3270 ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
3271 to_hns_roce_state(new_state), context,
3274 dev_err(dev, "hns_roce_qp_modify failed\n");
3279 * Use rst2init to instead of init2init with drv,
3280 * need to hw to flash RQ HEAD by DB again
3282 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3283 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3284 RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3285 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3286 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3287 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3288 RQ_DOORBELL_U32_8_CMD_S, 1);
3289 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3291 if (ibqp->uobject) {
3292 hr_qp->rq.db_reg = hr_dev->reg_base +
3293 hr_dev->odb_offset +
3294 DB_REG_OFFSET * hr_dev->priv_uar.index;
3297 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
3300 hr_qp->state = new_state;
3302 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3303 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3304 if (attr_mask & IB_QP_PORT) {
3305 hr_qp->port = attr->port_num - 1;
3306 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3309 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3310 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3311 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3312 if (ibqp->send_cq != ibqp->recv_cq)
3313 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3326 static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3327 const struct ib_qp_attr *attr, int attr_mask,
3328 enum ib_qp_state cur_state,
3329 enum ib_qp_state new_state)
3331 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
3334 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3335 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3338 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3342 static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3345 case HNS_ROCE_QP_STATE_RST:
3346 return IB_QPS_RESET;
3347 case HNS_ROCE_QP_STATE_INIT:
3349 case HNS_ROCE_QP_STATE_RTR:
3351 case HNS_ROCE_QP_STATE_RTS:
3353 case HNS_ROCE_QP_STATE_SQD:
3355 case HNS_ROCE_QP_STATE_ERR:
3362 static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3363 struct hns_roce_qp *hr_qp,
3364 struct hns_roce_qp_context *hr_context)
3366 struct hns_roce_cmd_mailbox *mailbox;
3369 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3370 if (IS_ERR(mailbox))
3371 return PTR_ERR(mailbox);
3373 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3374 HNS_ROCE_CMD_QUERY_QP,
3375 HNS_ROCE_CMD_TIMEOUT_MSECS);
3377 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3379 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3381 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3386 static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3388 struct ib_qp_init_attr *qp_init_attr)
3390 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3391 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3392 struct hns_roce_sqp_context context;
3395 mutex_lock(&hr_qp->mutex);
3397 if (hr_qp->state == IB_QPS_RESET) {
3398 qp_attr->qp_state = IB_QPS_RESET;
3402 addr = ROCEE_QP1C_CFG0_0_REG +
3403 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3404 context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
3405 context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
3406 context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
3407 context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
3408 context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
3409 context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
3410 context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
3411 context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
3412 context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
3413 context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
3415 hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3416 QP1C_BYTES_4_QP_STATE_M,
3417 QP1C_BYTES_4_QP_STATE_S);
3418 qp_attr->qp_state = hr_qp->state;
3419 qp_attr->path_mtu = IB_MTU_256;
3420 qp_attr->path_mig_state = IB_MIG_ARMED;
3421 qp_attr->qkey = QKEY_VAL;
3422 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3423 qp_attr->rq_psn = 0;
3424 qp_attr->sq_psn = 0;
3425 qp_attr->dest_qp_num = 1;
3426 qp_attr->qp_access_flags = 6;
3428 qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3429 QP1C_BYTES_20_PKEY_IDX_M,
3430 QP1C_BYTES_20_PKEY_IDX_S);
3431 qp_attr->port_num = hr_qp->port + 1;
3432 qp_attr->sq_draining = 0;
3433 qp_attr->max_rd_atomic = 0;
3434 qp_attr->max_dest_rd_atomic = 0;
3435 qp_attr->min_rnr_timer = 0;
3436 qp_attr->timeout = 0;
3437 qp_attr->retry_cnt = 0;
3438 qp_attr->rnr_retry = 0;
3439 qp_attr->alt_timeout = 0;
3442 qp_attr->cur_qp_state = qp_attr->qp_state;
3443 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3444 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3445 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3446 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3447 qp_attr->cap.max_inline_data = 0;
3448 qp_init_attr->cap = qp_attr->cap;
3449 qp_init_attr->create_flags = 0;
3451 mutex_unlock(&hr_qp->mutex);
3456 static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3458 struct ib_qp_init_attr *qp_init_attr)
3460 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3461 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3462 struct device *dev = &hr_dev->pdev->dev;
3463 struct hns_roce_qp_context *context;
3468 context = kzalloc(sizeof(*context), GFP_KERNEL);
3472 memset(qp_attr, 0, sizeof(*qp_attr));
3473 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3475 mutex_lock(&hr_qp->mutex);
3477 if (hr_qp->state == IB_QPS_RESET) {
3478 qp_attr->qp_state = IB_QPS_RESET;
3482 ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3484 dev_err(dev, "query qpc error\n");
3489 state = roce_get_field(context->qpc_bytes_144,
3490 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3491 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3492 tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3493 if (tmp_qp_state == -1) {
3494 dev_err(dev, "to_ib_qp_state error\n");
3498 hr_qp->state = (u8)tmp_qp_state;
3499 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3500 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3501 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3502 QP_CONTEXT_QPC_BYTES_48_MTU_S);
3503 qp_attr->path_mig_state = IB_MIG_ARMED;
3504 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3505 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3506 qp_attr->qkey = QKEY_VAL;
3508 qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3509 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3510 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3511 qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3512 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3513 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3514 qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3515 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3516 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3517 qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3518 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3519 ((roce_get_bit(context->qpc_bytes_4,
3520 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3521 ((roce_get_bit(context->qpc_bytes_4,
3522 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3524 if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
3525 struct ib_global_route *grh =
3526 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3528 rdma_ah_set_sl(&qp_attr->ah_attr,
3529 roce_get_field(context->qpc_bytes_156,
3530 QP_CONTEXT_QPC_BYTES_156_SL_M,
3531 QP_CONTEXT_QPC_BYTES_156_SL_S));
3532 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3534 roce_get_field(context->qpc_bytes_48,
3535 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3536 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3538 roce_get_field(context->qpc_bytes_36,
3539 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3540 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3542 roce_get_field(context->qpc_bytes_44,
3543 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3544 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3545 grh->traffic_class =
3546 roce_get_field(context->qpc_bytes_48,
3547 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3548 QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3550 memcpy(grh->dgid.raw, context->dgid,
3551 sizeof(grh->dgid.raw));
3554 qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3555 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3556 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3557 qp_attr->port_num = hr_qp->port + 1;
3558 qp_attr->sq_draining = 0;
3559 qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3560 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3561 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3562 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3563 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3564 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3565 qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3566 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3567 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3568 qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3569 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3570 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3571 qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3572 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3573 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3574 qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
3577 qp_attr->cur_qp_state = qp_attr->qp_state;
3578 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3579 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3581 if (!ibqp->uobject) {
3582 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3583 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3585 qp_attr->cap.max_send_wr = 0;
3586 qp_attr->cap.max_send_sge = 0;
3589 qp_init_attr->cap = qp_attr->cap;
3592 mutex_unlock(&hr_qp->mutex);
3597 static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3599 struct ib_qp_init_attr *qp_init_attr)
3601 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3603 return hr_qp->doorbell_qpn <= 1 ?
3604 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3605 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3608 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3610 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3611 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3612 struct hns_roce_cq *send_cq, *recv_cq;
3615 ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
3619 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
3620 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
3622 hns_roce_lock_cqs(send_cq, recv_cq);
3625 __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
3627 to_hr_srq(hr_qp->ibqp.srq) :
3630 if (send_cq && send_cq != recv_cq)
3631 __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3633 hns_roce_qp_remove(hr_dev, hr_qp);
3634 hns_roce_unlock_cqs(send_cq, recv_cq);
3636 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
3641 static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
3643 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3644 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3645 struct device *dev = &hr_dev->pdev->dev;
3651 * Before freeing cq buffer, we need to ensure that the outstanding CQE
3652 * have been written by checking the CQE counter.
3654 cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3656 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3657 HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3660 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3661 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3664 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3665 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3666 dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3675 static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
3677 roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3678 (req_not << eq->log_entries), eq->db_reg);
3681 static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3682 struct hns_roce_aeqe *aeqe, int qpn)
3684 struct device *dev = &hr_dev->pdev->dev;
3686 dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3687 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3688 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3689 case HNS_ROCE_LWQCE_QPC_ERROR:
3690 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3692 case HNS_ROCE_LWQCE_MTU_ERROR:
3693 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3695 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3696 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3698 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3699 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3701 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3702 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3704 case HNS_ROCE_LWQCE_SL_ERROR:
3705 dev_warn(dev, "QP %d, SL error.\n", qpn);
3707 case HNS_ROCE_LWQCE_PORT_ERROR:
3708 dev_warn(dev, "QP %d, port error.\n", qpn);
3715 static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3716 struct hns_roce_aeqe *aeqe,
3719 struct device *dev = &hr_dev->pdev->dev;
3721 dev_warn(dev, "Local Access Violation Work Queue Error.\n");
3722 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3723 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3724 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3725 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3727 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3728 dev_warn(dev, "QP %d, length error.\n", qpn);
3730 case HNS_ROCE_LAVWQE_VA_ERROR:
3731 dev_warn(dev, "QP %d, VA error.\n", qpn);
3733 case HNS_ROCE_LAVWQE_PD_ERROR:
3734 dev_err(dev, "QP %d, PD error.\n", qpn);
3736 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3737 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3739 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3740 dev_warn(dev, "QP %d, key state error.\n", qpn);
3742 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3743 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3750 static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
3751 struct hns_roce_aeqe *aeqe,
3754 struct device *dev = &hr_dev->pdev->dev;
3758 qpn = roce_get_field(aeqe->event.queue_event.num,
3759 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
3760 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
3761 phy_port = roce_get_field(aeqe->event.queue_event.num,
3762 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
3763 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
3765 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
3767 switch (event_type) {
3768 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3769 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
3770 "QP %d, phy_port %d.\n", qpn, phy_port);
3772 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3773 hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
3775 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3776 hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3782 hns_roce_qp_event(hr_dev, qpn, event_type);
3785 static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
3786 struct hns_roce_aeqe *aeqe,
3789 struct device *dev = &hr_dev->pdev->dev;
3792 cqn = roce_get_field(aeqe->event.queue_event.num,
3793 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
3794 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
3796 switch (event_type) {
3797 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3798 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3800 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3801 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3803 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3804 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
3810 hns_roce_cq_event(hr_dev, cqn, event_type);
3813 static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
3814 struct hns_roce_aeqe *aeqe)
3816 struct device *dev = &hr_dev->pdev->dev;
3818 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3819 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3820 case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
3821 dev_warn(dev, "SDB overflow.\n");
3823 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
3824 dev_warn(dev, "SDB almost overflow.\n");
3826 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
3827 dev_warn(dev, "SDB almost empty.\n");
3829 case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
3830 dev_warn(dev, "ODB overflow.\n");
3832 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
3833 dev_warn(dev, "ODB almost overflow.\n");
3835 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
3836 dev_warn(dev, "SDB almost empty.\n");
3843 static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
3845 unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE;
3847 return (struct hns_roce_aeqe *)((u8 *)
3848 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3849 off % HNS_ROCE_BA_SIZE);
3852 static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
3854 struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
3856 return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
3857 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3860 static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
3861 struct hns_roce_eq *eq)
3863 struct device *dev = &hr_dev->pdev->dev;
3864 struct hns_roce_aeqe *aeqe;
3865 int aeqes_found = 0;
3868 while ((aeqe = next_aeqe_sw_v1(eq))) {
3869 /* Make sure we read the AEQ entry after we have checked the
3874 dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
3876 roce_get_field(aeqe->asyn,
3877 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3878 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
3879 event_type = roce_get_field(aeqe->asyn,
3880 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3881 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
3882 switch (event_type) {
3883 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3884 dev_warn(dev, "PATH MIG not supported\n");
3886 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3887 dev_warn(dev, "COMMUNICATION established\n");
3889 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3890 dev_warn(dev, "SQ DRAINED not supported\n");
3892 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3893 dev_warn(dev, "PATH MIG failed\n");
3895 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3896 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3897 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3898 hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
3900 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3901 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3902 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3903 dev_warn(dev, "SRQ not support!\n");
3905 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3906 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3907 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3908 hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
3910 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
3911 dev_warn(dev, "port change.\n");
3913 case HNS_ROCE_EVENT_TYPE_MB:
3914 hns_roce_cmd_event(hr_dev,
3915 le16_to_cpu(aeqe->event.cmd.token),
3916 aeqe->event.cmd.status,
3917 le64_to_cpu(aeqe->event.cmd.out_param
3920 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3921 hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
3924 dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3925 event_type, eq->eqn, eq->cons_index);
3932 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
3936 set_eq_cons_index_v1(eq, 0);
3941 static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
3943 unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE;
3945 return (struct hns_roce_ceqe *)((u8 *)
3946 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3947 off % HNS_ROCE_BA_SIZE);
3950 static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
3952 struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
3954 return (!!(roce_get_bit(ceqe->comp,
3955 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
3956 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3959 static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
3960 struct hns_roce_eq *eq)
3962 struct hns_roce_ceqe *ceqe;
3963 int ceqes_found = 0;
3966 while ((ceqe = next_ceqe_sw_v1(eq))) {
3967 /* Make sure we read CEQ entry after we have checked the
3972 cqn = roce_get_field(ceqe->comp,
3973 HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
3974 HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
3975 hns_roce_cq_completion(hr_dev, cqn);
3980 if (eq->cons_index >
3981 EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
3985 set_eq_cons_index_v1(eq, 0);
3990 static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
3992 struct hns_roce_eq *eq = eq_ptr;
3993 struct hns_roce_dev *hr_dev = eq->hr_dev;
3996 if (eq->type_flag == HNS_ROCE_CEQ)
3997 /* CEQ irq routine, CEQ is pulse irq, not clear */
3998 int_work = hns_roce_v1_ceq_int(hr_dev, eq);
4000 /* AEQ irq routine, AEQ is pulse irq, not clear */
4001 int_work = hns_roce_v1_aeq_int(hr_dev, eq);
4003 return IRQ_RETVAL(int_work);
4006 static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
4008 struct hns_roce_dev *hr_dev = dev_id;
4009 struct device *dev = &hr_dev->pdev->dev;
4021 * Abnormal interrupt:
4022 * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
4023 * interrupt, mask irq, clear irq, cancel mask operation
4025 aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
4026 tmp = cpu_to_le32(aeshift_val);
4029 if (roce_get_bit(tmp,
4030 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
4031 dev_warn(dev, "AEQ overflow!\n");
4034 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4035 tmp = cpu_to_le32(caepaemask_val);
4036 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4037 HNS_ROCE_INT_MASK_ENABLE);
4038 caepaemask_val = le32_to_cpu(tmp);
4039 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4041 /* Clear int state(INT_WC : write 1 clear) */
4042 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
4043 tmp = cpu_to_le32(caepaest_val);
4044 roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
4045 caepaest_val = le32_to_cpu(tmp);
4046 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
4049 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4050 tmp = cpu_to_le32(caepaemask_val);
4051 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4052 HNS_ROCE_INT_MASK_DISABLE);
4053 caepaemask_val = le32_to_cpu(tmp);
4054 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4057 /* CEQ almost overflow */
4058 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4059 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4060 i * CEQ_REG_OFFSET);
4061 tmp = cpu_to_le32(ceshift_val);
4063 if (roce_get_bit(tmp,
4064 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4065 dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4069 cemask_val = roce_read(hr_dev,
4070 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4071 i * CEQ_REG_OFFSET);
4072 tmp = cpu_to_le32(cemask_val);
4074 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4075 HNS_ROCE_INT_MASK_ENABLE);
4076 cemask_val = le32_to_cpu(tmp);
4077 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4078 i * CEQ_REG_OFFSET, cemask_val);
4080 /* Clear int state(INT_WC : write 1 clear) */
4081 cealmovf_val = roce_read(hr_dev,
4082 ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4083 i * CEQ_REG_OFFSET);
4084 tmp = cpu_to_le32(cealmovf_val);
4086 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4088 cealmovf_val = le32_to_cpu(tmp);
4089 roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4090 i * CEQ_REG_OFFSET, cealmovf_val);
4093 cemask_val = roce_read(hr_dev,
4094 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4095 i * CEQ_REG_OFFSET);
4096 tmp = cpu_to_le32(cemask_val);
4098 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4099 HNS_ROCE_INT_MASK_DISABLE);
4100 cemask_val = le32_to_cpu(tmp);
4101 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4102 i * CEQ_REG_OFFSET, cemask_val);
4106 /* ECC multi-bit error alarm */
4107 dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4108 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4109 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4110 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4112 dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4113 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4114 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4115 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4117 return IRQ_RETVAL(int_work);
4120 static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4128 aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4129 tmp = cpu_to_le32(aemask_val);
4130 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4132 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4133 aemask_val = le32_to_cpu(tmp);
4134 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4137 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4139 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4140 i * CEQ_REG_OFFSET, masken);
4144 static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4145 struct hns_roce_eq *eq)
4147 int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4148 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4154 for (i = 0; i < npages; ++i)
4155 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4156 eq->buf_list[i].buf, eq->buf_list[i].map);
4158 kfree(eq->buf_list);
4161 static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4164 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4169 tmp = cpu_to_le32(val);
4173 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4174 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4175 HNS_ROCE_EQ_STAT_VALID);
4178 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4179 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4180 HNS_ROCE_EQ_STAT_INVALID);
4182 val = le32_to_cpu(tmp);
4186 static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4187 struct hns_roce_eq *eq)
4189 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4190 struct device *dev = &hr_dev->pdev->dev;
4191 dma_addr_t tmp_dma_addr;
4202 num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4203 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4205 if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4206 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4207 (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4212 eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4216 for (i = 0; i < num_bas; ++i) {
4217 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4220 if (!eq->buf_list[i].buf) {
4222 goto err_out_free_pages;
4225 eq->buf_list[i].map = tmp_dma_addr;
4228 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4229 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4230 HNS_ROCE_EQ_STAT_INVALID);
4231 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4232 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4234 eqshift_val = le32_to_cpu(tmp);
4235 writel(eqshift_val, eqc);
4237 /* Configure eq extended address 12~44bit */
4238 writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4241 * Configure eq extended address 45~49 bit.
4242 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4243 * using 4K page, and shift more 32 because of
4244 * calculating the high 32 bit value evaluated to hardware.
4246 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4247 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4248 eq->buf_list[0].map >> 44);
4249 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4250 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4251 eqcuridx_val = le32_to_cpu(tmp1);
4252 writel(eqcuridx_val, eqc + 8);
4254 /* Configure eq consumer index */
4255 roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4256 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4257 eqconsindx_val = le32_to_cpu(tmp2);
4258 writel(eqconsindx_val, eqc + 0xc);
4263 for (i -= 1; i >= 0; i--)
4264 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4265 eq->buf_list[i].map);
4267 kfree(eq->buf_list);
4271 static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4273 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4274 struct device *dev = &hr_dev->pdev->dev;
4275 struct hns_roce_eq *eq;
4281 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4282 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4284 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4288 eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4290 if (!eq_table->eqc_base) {
4292 goto err_eqc_base_alloc_fail;
4295 for (i = 0; i < eq_num; i++) {
4296 eq = &eq_table->eq[i];
4297 eq->hr_dev = hr_dev;
4299 eq->irq = hr_dev->irq[i];
4300 eq->log_page_size = PAGE_SHIFT;
4302 if (i < hr_dev->caps.num_comp_vectors) {
4304 eq_table->eqc_base[i] = hr_dev->reg_base +
4305 ROCEE_CAEP_CEQC_SHIFT_0_REG +
4307 eq->type_flag = HNS_ROCE_CEQ;
4308 eq->db_reg = hr_dev->reg_base +
4309 ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4311 eq->entries = hr_dev->caps.ceqe_depth;
4312 eq->log_entries = ilog2(eq->entries);
4313 eq->eqe_size = HNS_ROCE_CEQE_SIZE;
4316 eq_table->eqc_base[i] = hr_dev->reg_base +
4317 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4318 eq->type_flag = HNS_ROCE_AEQ;
4319 eq->db_reg = hr_dev->reg_base +
4320 ROCEE_CAEP_AEQE_CONS_IDX_REG;
4321 eq->entries = hr_dev->caps.aeqe_depth;
4322 eq->log_entries = ilog2(eq->entries);
4323 eq->eqe_size = HNS_ROCE_AEQE_SIZE;
4328 hns_roce_v1_int_mask_enable(hr_dev);
4330 /* Configure ce int interval */
4331 roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4332 HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4334 /* Configure ce int burst num */
4335 roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4336 HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4338 for (i = 0; i < eq_num; i++) {
4339 ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4341 dev_err(dev, "eq create failed\n");
4342 goto err_create_eq_fail;
4346 for (j = 0; j < irq_num; j++) {
4348 ret = request_irq(hr_dev->irq[j],
4349 hns_roce_v1_msix_interrupt_eq, 0,
4350 hr_dev->irq_names[j],
4353 ret = request_irq(hr_dev->irq[j],
4354 hns_roce_v1_msix_interrupt_abn, 0,
4355 hr_dev->irq_names[j], hr_dev);
4358 dev_err(dev, "request irq error!\n");
4359 goto err_request_irq_fail;
4363 for (i = 0; i < eq_num; i++)
4364 hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4368 err_request_irq_fail:
4369 for (j -= 1; j >= 0; j--)
4370 free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4373 for (i -= 1; i >= 0; i--)
4374 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4376 kfree(eq_table->eqc_base);
4378 err_eqc_base_alloc_fail:
4379 kfree(eq_table->eq);
4384 static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4386 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4391 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4392 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4393 for (i = 0; i < eq_num; i++) {
4395 hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4397 free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4399 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4401 for (i = eq_num; i < irq_num; i++)
4402 free_irq(hr_dev->irq[i], hr_dev);
4404 kfree(eq_table->eqc_base);
4405 kfree(eq_table->eq);
4408 static const struct ib_device_ops hns_roce_v1_dev_ops = {
4409 .destroy_qp = hns_roce_v1_destroy_qp,
4410 .poll_cq = hns_roce_v1_poll_cq,
4411 .post_recv = hns_roce_v1_post_recv,
4412 .post_send = hns_roce_v1_post_send,
4413 .query_qp = hns_roce_v1_query_qp,
4414 .req_notify_cq = hns_roce_v1_req_notify_cq,
4417 static const struct hns_roce_hw hns_roce_hw_v1 = {
4418 .reset = hns_roce_v1_reset,
4419 .hw_profile = hns_roce_v1_profile,
4420 .hw_init = hns_roce_v1_init,
4421 .hw_exit = hns_roce_v1_exit,
4422 .post_mbox = hns_roce_v1_post_mbox,
4423 .poll_mbox_done = hns_roce_v1_chk_mbox,
4424 .set_gid = hns_roce_v1_set_gid,
4425 .set_mac = hns_roce_v1_set_mac,
4426 .set_mtu = hns_roce_v1_set_mtu,
4427 .write_mtpt = hns_roce_v1_write_mtpt,
4428 .write_cqc = hns_roce_v1_write_cqc,
4429 .set_hem = hns_roce_v1_set_hem,
4430 .clear_hem = hns_roce_v1_clear_hem,
4431 .modify_qp = hns_roce_v1_modify_qp,
4432 .dereg_mr = hns_roce_v1_dereg_mr,
4433 .destroy_cq = hns_roce_v1_destroy_cq,
4434 .init_eq = hns_roce_v1_init_eq_table,
4435 .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4436 .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
4439 static const struct of_device_id hns_roce_of_match[] = {
4440 { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4443 MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4445 static const struct acpi_device_id hns_roce_acpi_match[] = {
4446 { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4449 MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4452 platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4456 /* get the 'device' corresponding to the matching 'fwnode' */
4457 dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
4458 /* get the platform device */
4459 return dev ? to_platform_device(dev) : NULL;
4462 static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4464 struct device *dev = &hr_dev->pdev->dev;
4465 struct platform_device *pdev = NULL;
4466 struct net_device *netdev = NULL;
4467 struct device_node *net_node;
4473 /* check if we are compatible with the underlying SoC */
4474 if (dev_of_node(dev)) {
4475 const struct of_device_id *of_id;
4477 of_id = of_match_node(hns_roce_of_match, dev->of_node);
4479 dev_err(dev, "device is not compatible!\n");
4482 hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4484 dev_err(dev, "couldn't get H/W specific DT data!\n");
4487 } else if (is_acpi_device_node(dev->fwnode)) {
4488 const struct acpi_device_id *acpi_id;
4490 acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4492 dev_err(dev, "device is not compatible!\n");
4495 hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4497 dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4501 dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4505 /* get the mapped register base address */
4506 hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
4507 if (IS_ERR(hr_dev->reg_base))
4508 return PTR_ERR(hr_dev->reg_base);
4510 /* read the node_guid of IB device from the DT or ACPI */
4511 ret = device_property_read_u8_array(dev, "node-guid",
4512 (u8 *)&hr_dev->ib_dev.node_guid,
4515 dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4519 /* get the RoCE associated ethernet ports or netdevices */
4520 for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4521 if (dev_of_node(dev)) {
4522 net_node = of_parse_phandle(dev->of_node, "eth-handle",
4526 pdev = of_find_device_by_node(net_node);
4527 } else if (is_acpi_device_node(dev->fwnode)) {
4528 struct fwnode_reference_args args;
4530 ret = acpi_node_get_property_reference(dev->fwnode,
4535 pdev = hns_roce_find_pdev(args.fwnode);
4537 dev_err(dev, "cannot read data from DT or ACPI\n");
4542 netdev = platform_get_drvdata(pdev);
4545 hr_dev->iboe.netdevs[port_cnt] = netdev;
4546 hr_dev->iboe.phy_port[port_cnt] = phy_port;
4548 dev_err(dev, "no netdev found with pdev %s\n",
4556 if (port_cnt == 0) {
4557 dev_err(dev, "unable to get eth-handle for available ports!\n");
4561 hr_dev->caps.num_ports = port_cnt;
4563 /* cmd issue mode: 0 is poll, 1 is event */
4564 hr_dev->cmd_mod = 1;
4565 hr_dev->loop_idc = 0;
4566 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4567 hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4569 /* read the interrupt names from the DT or ACPI */
4570 ret = device_property_read_string_array(dev, "interrupt-names",
4572 HNS_ROCE_V1_MAX_IRQ_NUM);
4574 dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4578 /* fetch the interrupt numbers */
4579 for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4580 hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4581 if (hr_dev->irq[i] <= 0)
4589 * hns_roce_probe - RoCE driver entrance
4590 * @pdev: pointer to platform device
4594 static int hns_roce_probe(struct platform_device *pdev)
4597 struct hns_roce_dev *hr_dev;
4598 struct device *dev = &pdev->dev;
4600 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
4604 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4605 if (!hr_dev->priv) {
4607 goto error_failed_kzalloc;
4610 hr_dev->pdev = pdev;
4612 platform_set_drvdata(pdev, hr_dev);
4614 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4615 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4616 dev_err(dev, "Not usable DMA addressing mode\n");
4618 goto error_failed_get_cfg;
4621 ret = hns_roce_get_cfg(hr_dev);
4623 dev_err(dev, "Get Configuration failed!\n");
4624 goto error_failed_get_cfg;
4627 ret = hns_roce_init(hr_dev);
4629 dev_err(dev, "RoCE engine init failed!\n");
4630 goto error_failed_get_cfg;
4635 error_failed_get_cfg:
4636 kfree(hr_dev->priv);
4638 error_failed_kzalloc:
4639 ib_dealloc_device(&hr_dev->ib_dev);
4645 * hns_roce_remove - remove RoCE device
4646 * @pdev: pointer to platform device
4648 static int hns_roce_remove(struct platform_device *pdev)
4650 struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4652 hns_roce_exit(hr_dev);
4653 kfree(hr_dev->priv);
4654 ib_dealloc_device(&hr_dev->ib_dev);
4659 static struct platform_driver hns_roce_driver = {
4660 .probe = hns_roce_probe,
4661 .remove = hns_roce_remove,
4664 .of_match_table = hns_roce_of_match,
4665 .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4669 module_platform_driver(hns_roce_driver);
4671 MODULE_LICENSE("Dual BSD/GPL");
4672 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4673 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4674 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4675 MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");