2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include <linux/acpi.h>
35 #include <linux/etherdevice.h>
36 #include <linux/interrupt.h>
38 #include <linux/of_platform.h>
39 #include <rdma/ib_umem.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_cmd.h"
43 #include "hns_roce_hem.h"
44 #include "hns_roce_hw_v1.h"
46 static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
48 dseg->lkey = cpu_to_le32(sg->lkey);
49 dseg->addr = cpu_to_le64(sg->addr);
50 dseg->len = cpu_to_le32(sg->length);
53 static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
56 rseg->raddr = cpu_to_le64(remote_addr);
57 rseg->rkey = cpu_to_le32(rkey);
61 static int hns_roce_v1_post_send(struct ib_qp *ibqp,
62 const struct ib_send_wr *wr,
63 const struct ib_send_wr **bad_wr)
65 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
66 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
67 struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
68 struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
69 struct hns_roce_wqe_data_seg *dseg = NULL;
70 struct hns_roce_qp *qp = to_hr_qp(ibqp);
71 struct device *dev = &hr_dev->pdev->dev;
72 struct hns_roce_sq_db sq_db = {};
73 int ps_opcode = 0, i = 0;
74 unsigned long flags = 0;
83 if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
84 ibqp->qp_type != IB_QPT_RC)) {
85 dev_err(dev, "un-supported QP type\n");
90 spin_lock_irqsave(&qp->sq.lock, flags);
92 for (nreq = 0; wr; ++nreq, wr = wr->next) {
93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
99 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
101 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
102 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
103 wr->num_sge, qp->sq.max_gs);
109 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
110 qp->sq.wrid[wqe_idx] = wr->wr_id;
112 /* Corresponding to the RC and RD type wqe process separately */
113 if (ibqp->qp_type == IB_QPT_GSI) {
115 roce_set_field(ud_sq_wqe->dmac_h,
116 UD_SEND_WQE_U32_4_DMAC_0_M,
117 UD_SEND_WQE_U32_4_DMAC_0_S,
119 roce_set_field(ud_sq_wqe->dmac_h,
120 UD_SEND_WQE_U32_4_DMAC_1_M,
121 UD_SEND_WQE_U32_4_DMAC_1_S,
123 roce_set_field(ud_sq_wqe->dmac_h,
124 UD_SEND_WQE_U32_4_DMAC_2_M,
125 UD_SEND_WQE_U32_4_DMAC_2_S,
127 roce_set_field(ud_sq_wqe->dmac_h,
128 UD_SEND_WQE_U32_4_DMAC_3_M,
129 UD_SEND_WQE_U32_4_DMAC_3_S,
132 roce_set_field(ud_sq_wqe->u32_8,
133 UD_SEND_WQE_U32_8_DMAC_4_M,
134 UD_SEND_WQE_U32_8_DMAC_4_S,
136 roce_set_field(ud_sq_wqe->u32_8,
137 UD_SEND_WQE_U32_8_DMAC_5_M,
138 UD_SEND_WQE_U32_8_DMAC_5_S,
141 smac = (u8 *)hr_dev->dev_addr[qp->port];
142 loopback = ether_addr_equal_unaligned(ah->av.mac,
144 roce_set_bit(ud_sq_wqe->u32_8,
145 UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
148 roce_set_field(ud_sq_wqe->u32_8,
149 UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
150 UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
151 HNS_ROCE_WQE_OPCODE_SEND);
152 roce_set_field(ud_sq_wqe->u32_8,
153 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
154 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
156 roce_set_bit(ud_sq_wqe->u32_8,
157 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
160 ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
161 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
162 (wr->send_flags & IB_SEND_SOLICITED ?
163 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
164 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
165 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
167 roce_set_field(ud_sq_wqe->u32_16,
168 UD_SEND_WQE_U32_16_DEST_QP_M,
169 UD_SEND_WQE_U32_16_DEST_QP_S,
170 ud_wr(wr)->remote_qpn);
171 roce_set_field(ud_sq_wqe->u32_16,
172 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
173 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
176 roce_set_field(ud_sq_wqe->u32_36,
177 UD_SEND_WQE_U32_36_FLOW_LABEL_M,
178 UD_SEND_WQE_U32_36_FLOW_LABEL_S,
180 roce_set_field(ud_sq_wqe->u32_36,
181 UD_SEND_WQE_U32_36_PRIORITY_M,
182 UD_SEND_WQE_U32_36_PRIORITY_S,
184 roce_set_field(ud_sq_wqe->u32_36,
185 UD_SEND_WQE_U32_36_SGID_INDEX_M,
186 UD_SEND_WQE_U32_36_SGID_INDEX_S,
187 hns_get_gid_index(hr_dev, qp->phy_port,
190 roce_set_field(ud_sq_wqe->u32_40,
191 UD_SEND_WQE_U32_40_HOP_LIMIT_M,
192 UD_SEND_WQE_U32_40_HOP_LIMIT_S,
194 roce_set_field(ud_sq_wqe->u32_40,
195 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
196 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
199 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
202 cpu_to_le32((u32)wr->sg_list[0].addr);
204 cpu_to_le32((wr->sg_list[0].addr) >> 32);
206 cpu_to_le32(wr->sg_list[0].lkey);
209 cpu_to_le32((u32)wr->sg_list[1].addr);
211 cpu_to_le32((wr->sg_list[1].addr) >> 32);
213 cpu_to_le32(wr->sg_list[1].lkey);
214 } else if (ibqp->qp_type == IB_QPT_RC) {
218 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
219 for (i = 0; i < wr->num_sge; i++)
220 tmp_len += wr->sg_list[i].length;
223 cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
228 switch (wr->opcode) {
229 case IB_WR_SEND_WITH_IMM:
230 case IB_WR_RDMA_WRITE_WITH_IMM:
231 ctrl->imm_data = wr->ex.imm_data;
233 case IB_WR_SEND_WITH_INV:
235 cpu_to_le32(wr->ex.invalidate_rkey);
242 /*Ctrl field, ctrl set type: sig, solic, imm, fence */
243 /* SO wait for conforming application scenarios */
244 ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
245 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
246 (wr->send_flags & IB_SEND_SOLICITED ?
247 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
248 ((wr->opcode == IB_WR_SEND_WITH_IMM ||
249 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
250 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
251 (wr->send_flags & IB_SEND_FENCE ?
252 (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
254 wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
256 switch (wr->opcode) {
257 case IB_WR_RDMA_READ:
258 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
259 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
262 case IB_WR_RDMA_WRITE:
263 case IB_WR_RDMA_WRITE_WITH_IMM:
264 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
265 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
269 case IB_WR_SEND_WITH_INV:
270 case IB_WR_SEND_WITH_IMM:
271 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
273 case IB_WR_LOCAL_INV:
275 case IB_WR_ATOMIC_CMP_AND_SWP:
276 case IB_WR_ATOMIC_FETCH_AND_ADD:
279 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
282 ctrl->flag |= cpu_to_le32(ps_opcode);
283 wqe += sizeof(struct hns_roce_wqe_raddr_seg);
286 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
287 if (le32_to_cpu(ctrl->msg_length) >
288 hr_dev->caps.max_sq_inline) {
291 dev_err(dev, "inline len(1-%d)=%d, illegal",
293 hr_dev->caps.max_sq_inline);
296 for (i = 0; i < wr->num_sge; i++) {
297 memcpy(wqe, ((void *) (uintptr_t)
298 wr->sg_list[i].addr),
299 wr->sg_list[i].length);
300 wqe += wr->sg_list[i].length;
302 ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
305 for (i = 0; i < wr->num_sge; i++)
306 set_data_seg(dseg + i, wr->sg_list + i);
308 ctrl->flag |= cpu_to_le32(wr->num_sge <<
309 HNS_ROCE_WQE_SGE_NUM_BIT);
321 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
322 SQ_DOORBELL_U32_4_SQ_HEAD_S,
323 (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
324 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
325 SQ_DOORBELL_U32_4_SL_S, qp->sl);
326 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
327 SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
328 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
329 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
330 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
332 doorbell[0] = sq_db.u32_4;
333 doorbell[1] = sq_db.u32_8;
335 hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
338 spin_unlock_irqrestore(&qp->sq.lock, flags);
343 static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
344 const struct ib_recv_wr *wr,
345 const struct ib_recv_wr **bad_wr)
347 struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
348 struct hns_roce_wqe_data_seg *scat = NULL;
349 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
350 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
351 struct device *dev = &hr_dev->pdev->dev;
352 struct hns_roce_rq_db rq_db = {};
353 __le32 doorbell[2] = {0};
354 unsigned long flags = 0;
355 unsigned int wqe_idx;
361 spin_lock_irqsave(&hr_qp->rq.lock, flags);
363 for (nreq = 0; wr; ++nreq, wr = wr->next) {
364 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
365 hr_qp->ibqp.recv_cq)) {
371 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
373 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
374 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
375 wr->num_sge, hr_qp->rq.max_gs);
381 ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
383 roce_set_field(ctrl->rwqe_byte_12,
384 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
385 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
388 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
390 for (i = 0; i < wr->num_sge; i++)
391 set_data_seg(scat + i, wr->sg_list + i);
393 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
398 hr_qp->rq.head += nreq;
402 if (ibqp->qp_type == IB_QPT_GSI) {
405 /* SW update GSI rq header */
406 reg_val = roce_read(to_hr_dev(ibqp->device),
407 ROCEE_QP1C_CFG3_0_REG +
408 QP1C_CFGN_OFFSET * hr_qp->phy_port);
409 tmp = cpu_to_le32(reg_val);
411 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
412 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
414 reg_val = le32_to_cpu(tmp);
415 roce_write(to_hr_dev(ibqp->device),
416 ROCEE_QP1C_CFG3_0_REG +
417 QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
419 roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
420 RQ_DOORBELL_U32_4_RQ_HEAD_S,
422 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
423 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
424 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
425 RQ_DOORBELL_U32_8_CMD_S, 1);
426 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
429 doorbell[0] = rq_db.u32_4;
430 doorbell[1] = rq_db.u32_8;
432 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
435 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
440 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
441 int sdb_mode, int odb_mode)
446 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
447 tmp = cpu_to_le32(val);
448 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
449 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
450 val = le32_to_cpu(tmp);
451 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
454 static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
460 /* Configure SDB/ODB extend mode */
461 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
462 tmp = cpu_to_le32(val);
463 roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
464 roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
465 val = le32_to_cpu(tmp);
466 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
469 static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
476 val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
477 tmp = cpu_to_le32(val);
478 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
479 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
480 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
481 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
482 val = le32_to_cpu(tmp);
483 roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
486 static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
493 val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
494 tmp = cpu_to_le32(val);
495 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
496 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
497 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
498 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
499 val = le32_to_cpu(tmp);
500 roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
503 static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
506 struct hns_roce_v1_priv *priv = hr_dev->priv;
507 struct hns_roce_db_table *db = &priv->db_table;
508 struct device *dev = &hr_dev->pdev->dev;
509 dma_addr_t sdb_dma_addr;
513 /* Configure extend SDB threshold */
514 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
515 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
517 /* Configure extend SDB base addr */
518 sdb_dma_addr = db->ext_db->sdb_buf_list->map;
519 roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
521 /* Configure extend SDB depth */
522 val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
523 tmp = cpu_to_le32(val);
524 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
525 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
526 db->ext_db->esdb_dep);
528 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
529 * using 4K page, and shift more 32 because of
530 * caculating the high 32 bit value evaluated to hardware.
532 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
533 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
534 val = le32_to_cpu(tmp);
535 roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
537 dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
538 dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
539 ext_sdb_alept, ext_sdb_alful);
542 static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
545 struct hns_roce_v1_priv *priv = hr_dev->priv;
546 struct hns_roce_db_table *db = &priv->db_table;
547 struct device *dev = &hr_dev->pdev->dev;
548 dma_addr_t odb_dma_addr;
552 /* Configure extend ODB threshold */
553 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
554 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
556 /* Configure extend ODB base addr */
557 odb_dma_addr = db->ext_db->odb_buf_list->map;
558 roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
560 /* Configure extend ODB depth */
561 val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
562 tmp = cpu_to_le32(val);
563 roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
564 ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
565 db->ext_db->eodb_dep);
566 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
567 ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
568 db->ext_db->eodb_dep);
569 val = le32_to_cpu(tmp);
570 roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
572 dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
573 dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
574 ext_odb_alept, ext_odb_alful);
577 static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
580 struct hns_roce_v1_priv *priv = hr_dev->priv;
581 struct hns_roce_db_table *db = &priv->db_table;
582 struct device *dev = &hr_dev->pdev->dev;
583 dma_addr_t sdb_dma_addr;
584 dma_addr_t odb_dma_addr;
587 db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
592 db->ext_db->sdb_buf_list = kmalloc(
593 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
594 if (!db->ext_db->sdb_buf_list) {
596 goto ext_sdb_buf_fail_out;
599 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
600 HNS_ROCE_V1_EXT_SDB_SIZE,
601 &sdb_dma_addr, GFP_KERNEL);
602 if (!db->ext_db->sdb_buf_list->buf) {
604 goto alloc_sq_db_buf_fail;
606 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
608 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
609 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
610 HNS_ROCE_V1_EXT_SDB_ALFUL);
612 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
613 HNS_ROCE_V1_SDB_ALFUL);
616 db->ext_db->odb_buf_list = kmalloc(
617 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
618 if (!db->ext_db->odb_buf_list) {
620 goto ext_odb_buf_fail_out;
623 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
624 HNS_ROCE_V1_EXT_ODB_SIZE,
625 &odb_dma_addr, GFP_KERNEL);
626 if (!db->ext_db->odb_buf_list->buf) {
628 goto alloc_otr_db_buf_fail;
630 db->ext_db->odb_buf_list->map = odb_dma_addr;
632 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
633 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
634 HNS_ROCE_V1_EXT_ODB_ALFUL);
636 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
637 HNS_ROCE_V1_ODB_ALFUL);
639 hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
643 alloc_otr_db_buf_fail:
644 kfree(db->ext_db->odb_buf_list);
646 ext_odb_buf_fail_out:
648 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
649 db->ext_db->sdb_buf_list->buf,
650 db->ext_db->sdb_buf_list->map);
653 alloc_sq_db_buf_fail:
655 kfree(db->ext_db->sdb_buf_list);
657 ext_sdb_buf_fail_out:
662 static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
665 struct device *dev = &hr_dev->pdev->dev;
666 struct ib_qp_init_attr init_attr;
669 memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
670 init_attr.qp_type = IB_QPT_RC;
671 init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
672 init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
673 init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
675 qp = hns_roce_create_qp(pd, &init_attr, NULL);
677 dev_err(dev, "Create loop qp for mr free failed!");
684 static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
686 struct hns_roce_v1_priv *priv = hr_dev->priv;
687 struct hns_roce_free_mr *free_mr = &priv->free_mr;
688 struct hns_roce_caps *caps = &hr_dev->caps;
689 struct ib_device *ibdev = &hr_dev->ib_dev;
690 struct device *dev = &hr_dev->pdev->dev;
691 struct ib_cq_init_attr cq_init_attr;
692 struct ib_qp_attr attr = { 0 };
693 struct hns_roce_qp *hr_qp;
697 __be64 subnet_prefix;
701 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
706 /* Reserved cq for loop qp */
707 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
708 cq_init_attr.comp_vector = 0;
710 cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
714 ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
716 dev_err(dev, "Create cq for reserved loop qp failed!");
717 goto alloc_cq_failed;
719 free_mr->mr_free_cq = to_hr_cq(cq);
720 free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
721 free_mr->mr_free_cq->ib_cq.uobject = NULL;
722 free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
723 free_mr->mr_free_cq->ib_cq.event_handler = NULL;
724 free_mr->mr_free_cq->ib_cq.cq_context = NULL;
725 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
727 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
730 goto alloc_mem_failed;
734 ret = hns_roce_alloc_pd(pd, NULL);
736 goto alloc_pd_failed;
738 free_mr->mr_free_pd = to_hr_pd(pd);
739 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
740 free_mr->mr_free_pd->ibpd.uobject = NULL;
741 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
742 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
744 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
746 attr.min_rnr_timer = 0;
747 /* Disable read ability */
748 attr.max_dest_rd_atomic = 0;
749 attr.max_rd_atomic = 0;
750 /* Use arbitrary values as rq_psn and sq_psn */
751 attr.rq_psn = 0x0808;
752 attr.sq_psn = 0x0808;
756 attr.path_mtu = IB_MTU_256;
757 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
758 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
759 rdma_ah_set_static_rate(&attr.ah_attr, 3);
761 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
762 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
763 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
764 (i % HNS_ROCE_MAX_PORTS);
765 sl = i / HNS_ROCE_MAX_PORTS;
767 for (j = 0; j < caps->num_ports; j++) {
768 if (hr_dev->iboe.phy_port[j] == phy_port) {
778 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
779 if (!free_mr->mr_free_qp[i]) {
780 dev_err(dev, "Create loop qp failed!\n");
782 goto create_lp_qp_failed;
784 hr_qp = free_mr->mr_free_qp[i];
787 hr_qp->phy_port = phy_port;
788 hr_qp->ibqp.qp_type = IB_QPT_RC;
789 hr_qp->ibqp.device = &hr_dev->ib_dev;
790 hr_qp->ibqp.uobject = NULL;
791 atomic_set(&hr_qp->ibqp.usecnt, 0);
793 hr_qp->ibqp.recv_cq = cq;
794 hr_qp->ibqp.send_cq = cq;
796 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
797 rdma_ah_set_sl(&attr.ah_attr, sl);
798 attr.port_num = port + 1;
800 attr.dest_qp_num = hr_qp->qpn;
801 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
802 hr_dev->dev_addr[port],
805 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
806 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
807 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
811 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
813 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
814 IB_QPS_RESET, IB_QPS_INIT);
816 dev_err(dev, "modify qp failed(%d)!\n", ret);
817 goto create_lp_qp_failed;
820 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
821 IB_QPS_INIT, IB_QPS_RTR);
823 dev_err(dev, "modify qp failed(%d)!\n", ret);
824 goto create_lp_qp_failed;
827 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
828 IB_QPS_RTR, IB_QPS_RTS);
830 dev_err(dev, "modify qp failed(%d)!\n", ret);
831 goto create_lp_qp_failed;
838 for (i -= 1; i >= 0; i--) {
839 hr_qp = free_mr->mr_free_qp[i];
840 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
841 dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
844 hns_roce_dealloc_pd(pd, NULL);
850 hns_roce_destroy_cq(cq, NULL);
856 static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
858 struct hns_roce_v1_priv *priv = hr_dev->priv;
859 struct hns_roce_free_mr *free_mr = &priv->free_mr;
860 struct device *dev = &hr_dev->pdev->dev;
861 struct hns_roce_qp *hr_qp;
865 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
866 hr_qp = free_mr->mr_free_qp[i];
870 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
872 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
876 hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
877 kfree(&free_mr->mr_free_cq->ib_cq);
878 hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
879 kfree(&free_mr->mr_free_pd->ibpd);
882 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
884 struct hns_roce_v1_priv *priv = hr_dev->priv;
885 struct hns_roce_db_table *db = &priv->db_table;
886 struct device *dev = &hr_dev->pdev->dev;
893 memset(db, 0, sizeof(*db));
895 /* Default DB mode */
896 sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
897 odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
898 sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
899 odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
901 db->sdb_ext_mod = sdb_ext_mod;
902 db->odb_ext_mod = odb_ext_mod;
905 ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
907 dev_err(dev, "Failed in extend DB configuration.\n");
911 hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
916 static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
918 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
919 struct hns_roce_dev *hr_dev;
921 lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
923 hr_dev = to_hr_dev(lp_qp_work->ib_dev);
925 hns_roce_v1_release_lp_qp(hr_dev);
927 if (hns_roce_v1_rsv_lp_qp(hr_dev))
928 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
930 if (lp_qp_work->comp_flag)
931 complete(lp_qp_work->comp);
936 static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
938 long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
939 struct hns_roce_v1_priv *priv = hr_dev->priv;
940 struct hns_roce_free_mr *free_mr = &priv->free_mr;
941 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
942 struct device *dev = &hr_dev->pdev->dev;
943 struct completion comp;
945 lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
950 INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
952 lp_qp_work->ib_dev = &(hr_dev->ib_dev);
953 lp_qp_work->comp = ∁
954 lp_qp_work->comp_flag = 1;
956 init_completion(lp_qp_work->comp);
958 queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
961 if (try_wait_for_completion(&comp))
963 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
964 end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
967 lp_qp_work->comp_flag = 0;
968 if (try_wait_for_completion(&comp))
971 dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
975 static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
977 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
978 struct device *dev = &hr_dev->pdev->dev;
979 struct ib_send_wr send_wr;
980 const struct ib_send_wr *bad_wr;
983 memset(&send_wr, 0, sizeof(send_wr));
986 send_wr.send_flags = 0;
987 send_wr.sg_list = NULL;
988 send_wr.wr_id = (unsigned long long)&send_wr;
989 send_wr.opcode = IB_WR_RDMA_WRITE;
991 ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
993 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
1000 static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1003 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1004 struct hns_roce_mr_free_work *mr_work =
1005 container_of(work, struct hns_roce_mr_free_work, work);
1006 struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
1007 struct hns_roce_v1_priv *priv = hr_dev->priv;
1008 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1009 struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
1010 struct hns_roce_mr *hr_mr = mr_work->mr;
1011 struct device *dev = &hr_dev->pdev->dev;
1012 struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
1013 struct hns_roce_qp *hr_qp;
1018 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1019 hr_qp = free_mr->mr_free_qp[i];
1024 ret = hns_roce_v1_send_lp_wqe(hr_qp);
1027 "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1034 dev_err(dev, "Reserved loop qp is absent!\n");
1039 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1040 if (ret < 0 && hr_qp) {
1042 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1043 hr_qp->qpn, ret, hr_mr->key, ne);
1047 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1048 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1049 } while (ne && time_before_eq(jiffies, end));
1053 "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1057 if (mr_work->comp_flag)
1058 complete(mr_work->comp);
1062 static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1063 struct hns_roce_mr *mr, struct ib_udata *udata)
1065 struct hns_roce_v1_priv *priv = hr_dev->priv;
1066 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1067 long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
1068 struct device *dev = &hr_dev->pdev->dev;
1069 struct hns_roce_mr_free_work *mr_work;
1070 unsigned long start = jiffies;
1071 struct completion comp;
1075 if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
1076 key_to_hw_index(mr->key) &
1077 (hr_dev->caps.num_mtpts - 1)))
1078 dev_warn(dev, "DESTROY_MPT failed!\n");
1081 mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1087 INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1089 mr_work->ib_dev = &(hr_dev->ib_dev);
1090 mr_work->comp = ∁
1091 mr_work->comp_flag = 1;
1092 mr_work->mr = (void *)mr;
1093 init_completion(mr_work->comp);
1095 queue_work(free_mr->free_mr_wq, &(mr_work->work));
1098 if (try_wait_for_completion(&comp))
1100 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1101 end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
1104 mr_work->comp_flag = 0;
1105 if (try_wait_for_completion(&comp))
1108 dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1112 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1113 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1115 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1116 key_to_hw_index(mr->key), 0);
1117 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
1123 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1125 struct hns_roce_v1_priv *priv = hr_dev->priv;
1126 struct hns_roce_db_table *db = &priv->db_table;
1127 struct device *dev = &hr_dev->pdev->dev;
1129 if (db->sdb_ext_mod) {
1130 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1131 db->ext_db->sdb_buf_list->buf,
1132 db->ext_db->sdb_buf_list->map);
1133 kfree(db->ext_db->sdb_buf_list);
1136 if (db->odb_ext_mod) {
1137 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1138 db->ext_db->odb_buf_list->buf,
1139 db->ext_db->odb_buf_list->map);
1140 kfree(db->ext_db->odb_buf_list);
1146 static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1148 struct hns_roce_v1_priv *priv = hr_dev->priv;
1149 struct hns_roce_raq_table *raq = &priv->raq_table;
1150 struct device *dev = &hr_dev->pdev->dev;
1157 raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1158 if (!raq->e_raq_buf)
1161 raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1163 if (!raq->e_raq_buf->buf) {
1165 goto err_dma_alloc_raq;
1167 raq->e_raq_buf->map = addr;
1169 /* Configure raq extended address. 48bit 4K align*/
1170 roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1172 /* Configure raq_shift */
1173 raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1174 val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1175 tmp = cpu_to_le32(val);
1176 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1177 ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1179 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1180 * using 4K page, and shift more 32 because of
1181 * caculating the high 32 bit value evaluated to hardware.
1183 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1184 ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1185 raq->e_raq_buf->map >> 44);
1186 val = le32_to_cpu(tmp);
1187 roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1188 dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1190 /* Configure raq threshold */
1191 val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1192 tmp = cpu_to_le32(val);
1193 roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1194 ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1195 HNS_ROCE_V1_EXT_RAQ_WF);
1196 val = le32_to_cpu(tmp);
1197 roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1198 dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1200 /* Enable extend raq */
1201 val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1202 tmp = cpu_to_le32(val);
1204 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1205 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1206 POL_TIME_INTERVAL_VAL);
1207 roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1209 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1210 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1213 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1214 val = le32_to_cpu(tmp);
1215 roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1216 dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1218 /* Enable raq drop */
1219 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1220 tmp = cpu_to_le32(val);
1221 roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1222 val = le32_to_cpu(tmp);
1223 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1224 dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1229 kfree(raq->e_raq_buf);
1233 static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1235 struct hns_roce_v1_priv *priv = hr_dev->priv;
1236 struct hns_roce_raq_table *raq = &priv->raq_table;
1237 struct device *dev = &hr_dev->pdev->dev;
1239 dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1240 raq->e_raq_buf->map);
1241 kfree(raq->e_raq_buf);
1244 static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1250 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1251 /* Open all ports */
1252 tmp = cpu_to_le32(val);
1253 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1254 ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1256 val = le32_to_cpu(tmp);
1257 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1259 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1260 /* Close all ports */
1261 tmp = cpu_to_le32(val);
1262 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1263 ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1264 val = le32_to_cpu(tmp);
1265 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1269 static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1271 struct hns_roce_v1_priv *priv = hr_dev->priv;
1272 struct device *dev = &hr_dev->pdev->dev;
1275 priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1276 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1278 if (!priv->bt_table.qpc_buf.buf)
1281 priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1282 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1284 if (!priv->bt_table.mtpt_buf.buf) {
1286 goto err_failed_alloc_mtpt_buf;
1289 priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1290 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1292 if (!priv->bt_table.cqc_buf.buf) {
1294 goto err_failed_alloc_cqc_buf;
1299 err_failed_alloc_cqc_buf:
1300 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1301 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1303 err_failed_alloc_mtpt_buf:
1304 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1305 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1310 static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1312 struct hns_roce_v1_priv *priv = hr_dev->priv;
1313 struct device *dev = &hr_dev->pdev->dev;
1315 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1316 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1318 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1319 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1321 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1322 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1325 static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1327 struct hns_roce_v1_priv *priv = hr_dev->priv;
1328 struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
1329 struct device *dev = &hr_dev->pdev->dev;
1332 * This buffer will be used for CQ's tptr(tail pointer), also
1333 * named ci(customer index). Every CQ will use 2 bytes to save
1334 * cqe ci in hip06. Hardware will read this area to get new ci
1335 * when the queue is almost full.
1337 tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1338 &tptr_buf->map, GFP_KERNEL);
1342 hr_dev->tptr_dma_addr = tptr_buf->map;
1343 hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1348 static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1350 struct hns_roce_v1_priv *priv = hr_dev->priv;
1351 struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
1352 struct device *dev = &hr_dev->pdev->dev;
1354 dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1355 tptr_buf->buf, tptr_buf->map);
1358 static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1360 struct hns_roce_v1_priv *priv = hr_dev->priv;
1361 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1362 struct device *dev = &hr_dev->pdev->dev;
1365 free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1366 if (!free_mr->free_mr_wq) {
1367 dev_err(dev, "Create free mr workqueue failed!\n");
1371 ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1373 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1374 flush_workqueue(free_mr->free_mr_wq);
1375 destroy_workqueue(free_mr->free_mr_wq);
1381 static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1383 struct hns_roce_v1_priv *priv = hr_dev->priv;
1384 struct hns_roce_free_mr *free_mr = &priv->free_mr;
1386 flush_workqueue(free_mr->free_mr_wq);
1387 destroy_workqueue(free_mr->free_mr_wq);
1389 hns_roce_v1_release_lp_qp(hr_dev);
1393 * hns_roce_v1_reset - reset RoCE
1394 * @hr_dev: RoCE device struct pointer
1395 * @enable: true -- drop reset, false -- reset
1396 * return 0 - success , negative --fail
1398 static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1400 struct device_node *dsaf_node;
1401 struct device *dev = &hr_dev->pdev->dev;
1402 struct device_node *np = dev->of_node;
1403 struct fwnode_handle *fwnode;
1406 /* check if this is DT/ACPI case */
1407 if (dev_of_node(dev)) {
1408 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1410 dev_err(dev, "could not find dsaf-handle\n");
1413 fwnode = &dsaf_node->fwnode;
1414 } else if (is_acpi_device_node(dev->fwnode)) {
1415 struct fwnode_reference_args args;
1417 ret = acpi_node_get_property_reference(dev->fwnode,
1418 "dsaf-handle", 0, &args);
1420 dev_err(dev, "could not find dsaf-handle\n");
1423 fwnode = args.fwnode;
1425 dev_err(dev, "cannot read data from DT or ACPI\n");
1429 ret = hns_dsaf_roce_reset(fwnode, false);
1434 msleep(SLEEP_TIME_INTERVAL);
1435 ret = hns_dsaf_roce_reset(fwnode, true);
1441 static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1444 struct hns_roce_caps *caps = &hr_dev->caps;
1446 hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
1447 hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
1448 hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
1449 ((u64)roce_read(hr_dev,
1450 ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
1451 hr_dev->hw_rev = HNS_ROCE_HW_VER1;
1453 caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
1454 caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
1455 caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
1456 caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
1457 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1458 caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
1459 caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
1460 caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
1461 caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
1462 caps->num_uars = HNS_ROCE_V1_UAR_NUM;
1463 caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
1464 caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
1465 caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
1466 caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1467 caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
1468 caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
1469 caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
1470 caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1471 caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1472 caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1473 caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1474 caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1475 caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1476 caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1477 caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1478 caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1479 caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1480 caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1481 caps->reserved_lkey = 0;
1482 caps->reserved_pds = 0;
1483 caps->reserved_mrws = 1;
1484 caps->reserved_uars = 0;
1485 caps->reserved_cqs = 0;
1486 caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */
1487 caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1489 for (i = 0; i < caps->num_ports; i++)
1490 caps->pkey_table_len[i] = 1;
1492 for (i = 0; i < caps->num_ports; i++) {
1493 /* Six ports shared 16 GID in v1 engine */
1494 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1495 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1498 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1499 caps->num_ports + 1;
1502 caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1503 caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1504 caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
1505 caps->max_mtu = IB_MTU_2048;
1510 static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1515 struct device *dev = &hr_dev->pdev->dev;
1517 /* DMAE user config */
1518 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1519 tmp = cpu_to_le32(val);
1520 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1521 ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1522 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1523 ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1524 1 << PAGES_SHIFT_16);
1525 val = le32_to_cpu(tmp);
1526 roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1528 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1529 tmp = cpu_to_le32(val);
1530 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1531 ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1532 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1533 ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1534 1 << PAGES_SHIFT_16);
1536 ret = hns_roce_db_init(hr_dev);
1538 dev_err(dev, "doorbell init failed!\n");
1542 ret = hns_roce_raq_init(hr_dev);
1544 dev_err(dev, "raq init failed!\n");
1545 goto error_failed_raq_init;
1548 ret = hns_roce_bt_init(hr_dev);
1550 dev_err(dev, "bt init failed!\n");
1551 goto error_failed_bt_init;
1554 ret = hns_roce_tptr_init(hr_dev);
1556 dev_err(dev, "tptr init failed!\n");
1557 goto error_failed_tptr_init;
1560 ret = hns_roce_free_mr_init(hr_dev);
1562 dev_err(dev, "free mr init failed!\n");
1563 goto error_failed_free_mr_init;
1566 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1570 error_failed_free_mr_init:
1571 hns_roce_tptr_free(hr_dev);
1573 error_failed_tptr_init:
1574 hns_roce_bt_free(hr_dev);
1576 error_failed_bt_init:
1577 hns_roce_raq_free(hr_dev);
1579 error_failed_raq_init:
1580 hns_roce_db_free(hr_dev);
1584 static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1586 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1587 hns_roce_free_mr_free(hr_dev);
1588 hns_roce_tptr_free(hr_dev);
1589 hns_roce_bt_free(hr_dev);
1590 hns_roce_raq_free(hr_dev);
1591 hns_roce_db_free(hr_dev);
1594 static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1596 u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1598 return (!!(status & (1 << HCR_GO_BIT)));
1601 static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1602 u64 out_param, u32 in_modifier, u8 op_modifier,
1603 u16 op, u16 token, int event)
1605 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1610 end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1611 while (hns_roce_v1_cmd_pending(hr_dev)) {
1612 if (time_after(jiffies, end)) {
1613 dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1614 (int)jiffies, (int)end);
1620 tmp = cpu_to_le32(val);
1621 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1623 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1624 ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1625 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1626 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1627 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1628 ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1630 val = le32_to_cpu(tmp);
1631 writeq(in_param, hcr + 0);
1632 writeq(out_param, hcr + 2);
1633 writel(in_modifier, hcr + 4);
1634 /* Memory barrier */
1637 writel(val, hcr + 5);
1642 static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1643 unsigned long timeout)
1645 u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1646 unsigned long end = 0;
1649 end = msecs_to_jiffies(timeout) + jiffies;
1650 while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1653 if (hns_roce_v1_cmd_pending(hr_dev)) {
1654 dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1658 status = le32_to_cpu((__force __le32)
1659 __raw_readl(hcr + HCR_STATUS_OFFSET));
1660 if ((status & STATUS_MASK) != 0x1) {
1661 dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1668 static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1669 int gid_index, const union ib_gid *gid,
1670 const struct ib_gid_attr *attr)
1672 unsigned long flags;
1676 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1678 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
1680 p = (u32 *)&gid->raw[0];
1681 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1682 (HNS_ROCE_V1_GID_NUM * gid_idx));
1684 p = (u32 *)&gid->raw[4];
1685 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1686 (HNS_ROCE_V1_GID_NUM * gid_idx));
1688 p = (u32 *)&gid->raw[8];
1689 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1690 (HNS_ROCE_V1_GID_NUM * gid_idx));
1692 p = (u32 *)&gid->raw[0xc];
1693 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1694 (HNS_ROCE_V1_GID_NUM * gid_idx));
1696 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
1701 static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1712 * When mac changed, loopback may fail
1713 * because of smac not equal to dmac.
1714 * We Need to release and create reserved qp again.
1716 if (hr_dev->hw->dereg_mr) {
1719 ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1720 if (ret && ret != -ETIMEDOUT)
1724 p = (u32 *)(&addr[0]);
1726 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1727 PHY_PORT_OFFSET * phy_port);
1729 val = roce_read(hr_dev,
1730 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1731 tmp = cpu_to_le32(val);
1732 p_h = (u16 *)(&addr[4]);
1734 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1735 ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1736 val = le32_to_cpu(tmp);
1737 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1743 static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1749 val = roce_read(hr_dev,
1750 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1751 tmp = cpu_to_le32(val);
1752 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1753 ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1754 val = le32_to_cpu(tmp);
1755 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1759 static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1760 unsigned long mtpt_idx)
1762 struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
1763 u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
1764 struct ib_device *ibdev = &hr_dev->ib_dev;
1765 struct hns_roce_v1_mpt_entry *mpt_entry;
1770 /* MPT filled into mailbox buf */
1771 mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1772 memset(mpt_entry, 0, sizeof(*mpt_entry));
1774 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1775 MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1776 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1777 MPT_BYTE_4_KEY_S, mr->key);
1778 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1779 MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1780 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1781 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1782 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1783 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1784 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1785 MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1786 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1787 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1788 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1789 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1790 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1791 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1792 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1793 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1795 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1797 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1798 MPT_BYTE_12_PBL_ADDR_H_S, 0);
1799 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1800 MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1802 mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
1803 mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
1804 mpt_entry->length = cpu_to_le32((u32)mr->size);
1806 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1807 MPT_BYTE_28_PD_S, mr->pd);
1808 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1809 MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1810 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1811 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1813 /* DMA memory register */
1814 if (mr->type == MR_TYPE_DMA)
1817 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
1818 ARRAY_SIZE(pages), &pbl_ba);
1820 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
1824 /* Register user mr */
1825 for (i = 0; i < count; i++) {
1828 mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1829 roce_set_field(mpt_entry->mpt_byte_36,
1830 MPT_BYTE_36_PA0_H_M,
1831 MPT_BYTE_36_PA0_H_S,
1832 (u32)(pages[i] >> PAGES_SHIFT_32));
1835 roce_set_field(mpt_entry->mpt_byte_36,
1836 MPT_BYTE_36_PA1_L_M,
1837 MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
1838 roce_set_field(mpt_entry->mpt_byte_40,
1839 MPT_BYTE_40_PA1_H_M,
1840 MPT_BYTE_40_PA1_H_S,
1841 (u32)(pages[i] >> PAGES_SHIFT_24));
1844 roce_set_field(mpt_entry->mpt_byte_40,
1845 MPT_BYTE_40_PA2_L_M,
1846 MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
1847 roce_set_field(mpt_entry->mpt_byte_44,
1848 MPT_BYTE_44_PA2_H_M,
1849 MPT_BYTE_44_PA2_H_S,
1850 (u32)(pages[i] >> PAGES_SHIFT_16));
1853 roce_set_field(mpt_entry->mpt_byte_44,
1854 MPT_BYTE_44_PA3_L_M,
1855 MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
1856 roce_set_field(mpt_entry->mpt_byte_48,
1857 MPT_BYTE_48_PA3_H_M,
1858 MPT_BYTE_48_PA3_H_S,
1859 (u32)(pages[i] >> PAGES_SHIFT_8));
1862 mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1863 roce_set_field(mpt_entry->mpt_byte_56,
1864 MPT_BYTE_56_PA4_H_M,
1865 MPT_BYTE_56_PA4_H_S,
1866 (u32)(pages[i] >> PAGES_SHIFT_32));
1869 roce_set_field(mpt_entry->mpt_byte_56,
1870 MPT_BYTE_56_PA5_L_M,
1871 MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
1872 roce_set_field(mpt_entry->mpt_byte_60,
1873 MPT_BYTE_60_PA5_H_M,
1874 MPT_BYTE_60_PA5_H_S,
1875 (u32)(pages[i] >> PAGES_SHIFT_24));
1878 roce_set_field(mpt_entry->mpt_byte_60,
1879 MPT_BYTE_60_PA6_L_M,
1880 MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
1881 roce_set_field(mpt_entry->mpt_byte_64,
1882 MPT_BYTE_64_PA6_H_M,
1883 MPT_BYTE_64_PA6_H_S,
1884 (u32)(pages[i] >> PAGES_SHIFT_16));
1891 mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
1892 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1893 MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
1898 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1900 return hns_roce_buf_offset(hr_cq->mtr.kmem,
1901 n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1904 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1906 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1908 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1909 return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1910 !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
1913 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1915 return get_sw_cqe(hr_cq, hr_cq->cons_index);
1918 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1922 doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
1924 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
1925 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
1926 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
1927 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
1928 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
1929 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
1930 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
1932 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1935 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1936 struct hns_roce_srq *srq)
1938 struct hns_roce_cqe *cqe, *dest;
1943 for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
1945 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1950 * Now backwards through the CQ, removing CQ entries
1951 * that match our QP by overwriting them with next entries.
1953 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1954 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1955 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
1956 CQE_BYTE_16_LOCAL_QPN_S) &
1957 HNS_ROCE_CQE_QPN_MASK) == qpn) {
1958 /* In v1 engine, not support SRQ */
1960 } else if (nfreed) {
1961 dest = get_cqe(hr_cq, (prod_index + nfreed) &
1963 owner_bit = roce_get_bit(dest->cqe_byte_4,
1964 CQE_BYTE_4_OWNER_S);
1965 memcpy(dest, cqe, sizeof(*cqe));
1966 roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
1972 hr_cq->cons_index += nfreed;
1974 * Make sure update of buffer contents is done before
1975 * updating consumer index.
1979 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
1983 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1984 struct hns_roce_srq *srq)
1986 spin_lock_irq(&hr_cq->lock);
1987 __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
1988 spin_unlock_irq(&hr_cq->lock);
1991 static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
1992 struct hns_roce_cq *hr_cq, void *mb_buf,
1993 u64 *mtts, dma_addr_t dma_handle)
1995 struct hns_roce_v1_priv *priv = hr_dev->priv;
1996 struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
1997 struct hns_roce_cq_context *cq_context = mb_buf;
1998 dma_addr_t tptr_dma_addr;
2001 memset(cq_context, 0, sizeof(*cq_context));
2003 /* Get the tptr for this CQ. */
2004 offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2005 tptr_dma_addr = tptr_buf->map + offset;
2006 hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2008 /* Register cq_context members */
2009 roce_set_field(cq_context->cqc_byte_4,
2010 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2011 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2012 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2013 CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2015 cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
2017 roce_set_field(cq_context->cqc_byte_12,
2018 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2019 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2020 ((u64)dma_handle >> 32));
2021 roce_set_field(cq_context->cqc_byte_12,
2022 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2023 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2024 ilog2(hr_cq->cq_depth));
2025 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2026 CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
2028 cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
2030 roce_set_field(cq_context->cqc_byte_20,
2031 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2032 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
2033 /* Dedicated hardware, directly set 0 */
2034 roce_set_field(cq_context->cqc_byte_20,
2035 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2036 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2038 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2039 * using 4K page, and shift more 32 because of
2040 * caculating the high 32 bit value evaluated to hardware.
2042 roce_set_field(cq_context->cqc_byte_20,
2043 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2044 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2045 tptr_dma_addr >> 44);
2047 cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
2049 roce_set_field(cq_context->cqc_byte_32,
2050 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2051 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2052 roce_set_bit(cq_context->cqc_byte_32,
2053 CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2054 roce_set_bit(cq_context->cqc_byte_32,
2055 CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2056 roce_set_bit(cq_context->cqc_byte_32,
2057 CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2058 roce_set_bit(cq_context->cqc_byte_32,
2059 CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2061 /* The initial value of cq's ci is 0 */
2062 roce_set_field(cq_context->cqc_byte_32,
2063 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2064 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2067 static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2072 static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2073 enum ib_cq_notify_flags flags)
2075 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2076 u32 notification_flag;
2077 __le32 doorbell[2] = {};
2079 notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2080 IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2082 * flags = 0; Notification Flag = 1, next
2083 * flags = 1; Notification Flag = 0, solocited
2086 cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2087 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2088 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2089 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2090 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2091 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2092 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2093 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2094 hr_cq->cqn | notification_flag);
2096 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2101 static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2102 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2109 struct hns_roce_cqe *cqe;
2110 struct hns_roce_qp *hr_qp;
2111 struct hns_roce_wq *wq;
2112 struct hns_roce_wqe_ctrl_seg *sq_wqe;
2113 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2114 struct device *dev = &hr_dev->pdev->dev;
2116 /* Find cqe according consumer index */
2117 cqe = next_cqe_sw(hr_cq);
2121 ++hr_cq->cons_index;
2122 /* Memory barrier */
2125 is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2127 /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2128 if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2129 CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2130 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2131 CQE_BYTE_20_PORT_NUM_S) +
2132 roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2133 CQE_BYTE_16_LOCAL_QPN_S) *
2136 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2137 CQE_BYTE_16_LOCAL_QPN_S);
2140 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2141 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2142 if (unlikely(!hr_qp)) {
2143 dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2144 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2151 wc->qp = &(*cur_qp)->ibqp;
2154 status = roce_get_field(cqe->cqe_byte_4,
2155 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2156 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2157 HNS_ROCE_CQE_STATUS_MASK;
2159 case HNS_ROCE_CQE_SUCCESS:
2160 wc->status = IB_WC_SUCCESS;
2162 case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2163 wc->status = IB_WC_LOC_LEN_ERR;
2165 case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2166 wc->status = IB_WC_LOC_QP_OP_ERR;
2168 case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2169 wc->status = IB_WC_LOC_PROT_ERR;
2171 case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2172 wc->status = IB_WC_WR_FLUSH_ERR;
2174 case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2175 wc->status = IB_WC_MW_BIND_ERR;
2177 case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2178 wc->status = IB_WC_BAD_RESP_ERR;
2180 case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2181 wc->status = IB_WC_LOC_ACCESS_ERR;
2183 case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2184 wc->status = IB_WC_REM_INV_REQ_ERR;
2186 case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2187 wc->status = IB_WC_REM_ACCESS_ERR;
2189 case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2190 wc->status = IB_WC_REM_OP_ERR;
2192 case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2193 wc->status = IB_WC_RETRY_EXC_ERR;
2195 case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2196 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2199 wc->status = IB_WC_GENERAL_ERR;
2203 /* CQE status error, directly return */
2204 if (wc->status != IB_WC_SUCCESS)
2208 /* SQ conrespond to CQE */
2209 sq_wqe = hns_roce_get_send_wqe(*cur_qp,
2210 roce_get_field(cqe->cqe_byte_4,
2211 CQE_BYTE_4_WQE_INDEX_M,
2212 CQE_BYTE_4_WQE_INDEX_S) &
2213 ((*cur_qp)->sq.wqe_cnt-1));
2214 switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2215 case HNS_ROCE_WQE_OPCODE_SEND:
2216 wc->opcode = IB_WC_SEND;
2218 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2219 wc->opcode = IB_WC_RDMA_READ;
2220 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2222 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2223 wc->opcode = IB_WC_RDMA_WRITE;
2225 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2226 wc->opcode = IB_WC_LOCAL_INV;
2228 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2229 wc->opcode = IB_WC_SEND;
2232 wc->status = IB_WC_GENERAL_ERR;
2235 wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2236 IB_WC_WITH_IMM : 0);
2238 wq = &(*cur_qp)->sq;
2239 if ((*cur_qp)->sq_signal_bits) {
2241 * If sg_signal_bit is 1,
2242 * firstly tail pointer updated to wqe
2243 * which current cqe correspond to
2245 wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2246 CQE_BYTE_4_WQE_INDEX_M,
2247 CQE_BYTE_4_WQE_INDEX_S);
2248 wq->tail += (wqe_ctr - (u16)wq->tail) &
2251 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2254 /* RQ conrespond to CQE */
2255 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2256 opcode = roce_get_field(cqe->cqe_byte_4,
2257 CQE_BYTE_4_OPERATION_TYPE_M,
2258 CQE_BYTE_4_OPERATION_TYPE_S) &
2259 HNS_ROCE_CQE_OPCODE_MASK;
2261 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2262 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2263 wc->wc_flags = IB_WC_WITH_IMM;
2265 cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2267 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2268 if (roce_get_bit(cqe->cqe_byte_4,
2269 CQE_BYTE_4_IMM_INDICATOR_S)) {
2270 wc->opcode = IB_WC_RECV;
2271 wc->wc_flags = IB_WC_WITH_IMM;
2272 wc->ex.imm_data = cpu_to_be32(
2273 le32_to_cpu(cqe->immediate_data));
2275 wc->opcode = IB_WC_RECV;
2280 wc->status = IB_WC_GENERAL_ERR;
2284 /* Update tail pointer, record wr_id */
2285 wq = &(*cur_qp)->rq;
2286 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2288 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2290 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2291 CQE_BYTE_20_REMOTE_QPN_M,
2292 CQE_BYTE_20_REMOTE_QPN_S);
2293 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2294 CQE_BYTE_20_GRH_PRESENT_S) ?
2296 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2297 CQE_BYTE_28_P_KEY_IDX_M,
2298 CQE_BYTE_28_P_KEY_IDX_S);
2304 int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2306 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2307 struct hns_roce_qp *cur_qp = NULL;
2308 unsigned long flags;
2312 spin_lock_irqsave(&hr_cq->lock, flags);
2314 for (npolled = 0; npolled < num_entries; ++npolled) {
2315 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2321 *hr_cq->tptr_addr = hr_cq->cons_index &
2322 ((hr_cq->cq_depth << 1) - 1);
2324 /* Memroy barrier */
2326 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2329 spin_unlock_irqrestore(&hr_cq->lock, flags);
2331 if (ret == 0 || ret == -EAGAIN)
2337 static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2338 struct hns_roce_hem_table *table, int obj,
2341 struct hns_roce_v1_priv *priv = hr_dev->priv;
2342 struct device *dev = &hr_dev->pdev->dev;
2343 long end = HW_SYNC_TIMEOUT_MSECS;
2344 __le32 bt_cmd_val[2] = {0};
2345 unsigned long flags = 0;
2346 void __iomem *bt_cmd;
2349 switch (table->type) {
2351 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2354 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2357 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2360 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2365 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2366 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
2367 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2368 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2369 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2370 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2372 spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2374 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2377 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2379 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2380 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2387 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
2388 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
2391 bt_cmd_val[0] = cpu_to_le32(bt_ba);
2392 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2393 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2394 hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2396 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2401 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2402 enum hns_roce_qp_state cur_state,
2403 enum hns_roce_qp_state new_state,
2404 struct hns_roce_qp_context *context,
2405 struct hns_roce_qp *hr_qp)
2408 op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2409 [HNS_ROCE_QP_STATE_RST] = {
2410 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2411 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2412 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2414 [HNS_ROCE_QP_STATE_INIT] = {
2415 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2416 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2417 /* Note: In v1 engine, HW doesn't support RST2INIT.
2418 * We use RST2INIT cmd instead of INIT2INIT.
2420 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2421 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2423 [HNS_ROCE_QP_STATE_RTR] = {
2424 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2425 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2426 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2428 [HNS_ROCE_QP_STATE_RTS] = {
2429 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2430 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2431 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2432 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2434 [HNS_ROCE_QP_STATE_SQD] = {
2435 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2436 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2437 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2438 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2440 [HNS_ROCE_QP_STATE_ERR] = {
2441 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2442 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2446 struct hns_roce_cmd_mailbox *mailbox;
2447 struct device *dev = &hr_dev->pdev->dev;
2450 if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2451 new_state >= HNS_ROCE_QP_NUM_STATE ||
2452 !op[cur_state][new_state]) {
2453 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2454 cur_state, new_state);
2458 if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2459 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2460 HNS_ROCE_CMD_2RST_QP,
2461 HNS_ROCE_CMD_TIMEOUT_MSECS);
2463 if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2464 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2465 HNS_ROCE_CMD_2ERR_QP,
2466 HNS_ROCE_CMD_TIMEOUT_MSECS);
2468 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2469 if (IS_ERR(mailbox))
2470 return PTR_ERR(mailbox);
2472 memcpy(mailbox->buf, context, sizeof(*context));
2474 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2475 op[cur_state][new_state],
2476 HNS_ROCE_CMD_TIMEOUT_MSECS);
2478 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2482 static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
2483 u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
2485 struct ib_device *ibdev = &hr_dev->ib_dev;
2489 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
2491 ibdev_err(ibdev, "Failed to find SQ ba\n");
2494 rq_pa_start = hr_qp->rq.offset >> hr_qp->mtr.hem_cfg.buf_pg_shift;
2495 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, rq_pa_start, rq_ba, 1,
2498 ibdev_err(ibdev, "Failed to find RQ ba\n");
2505 static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2506 int attr_mask, enum ib_qp_state cur_state,
2507 enum ib_qp_state new_state)
2509 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2510 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2511 struct hns_roce_sqp_context *context;
2512 dma_addr_t dma_handle = 0;
2519 context = kzalloc(sizeof(*context), GFP_KERNEL);
2523 /* Search QP buf's MTTs */
2524 if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
2527 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2528 roce_set_field(context->qp1c_bytes_4,
2529 QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2530 QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2531 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2532 roce_set_field(context->qp1c_bytes_4,
2533 QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2534 QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2535 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2536 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2537 QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2539 context->sq_rq_bt_l = cpu_to_le32(dma_handle);
2540 roce_set_field(context->qp1c_bytes_12,
2541 QP1C_BYTES_12_SQ_RQ_BT_H_M,
2542 QP1C_BYTES_12_SQ_RQ_BT_H_S,
2543 upper_32_bits(dma_handle));
2545 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2546 QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2547 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2548 QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2549 roce_set_bit(context->qp1c_bytes_16,
2550 QP1C_BYTES_16_SIGNALING_TYPE_S,
2551 hr_qp->sq_signal_bits);
2552 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2554 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2556 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2559 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2560 QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2561 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2562 QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2564 context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
2566 roce_set_field(context->qp1c_bytes_28,
2567 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2568 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2569 upper_32_bits(rq_ba));
2570 roce_set_field(context->qp1c_bytes_28,
2571 QP1C_BYTES_28_RQ_CUR_IDX_M,
2572 QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2574 roce_set_field(context->qp1c_bytes_32,
2575 QP1C_BYTES_32_RX_CQ_NUM_M,
2576 QP1C_BYTES_32_RX_CQ_NUM_S,
2577 to_hr_cq(ibqp->recv_cq)->cqn);
2578 roce_set_field(context->qp1c_bytes_32,
2579 QP1C_BYTES_32_TX_CQ_NUM_M,
2580 QP1C_BYTES_32_TX_CQ_NUM_S,
2581 to_hr_cq(ibqp->send_cq)->cqn);
2583 context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
2585 roce_set_field(context->qp1c_bytes_40,
2586 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2587 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2588 upper_32_bits(sq_ba));
2589 roce_set_field(context->qp1c_bytes_40,
2590 QP1C_BYTES_40_SQ_CUR_IDX_M,
2591 QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2593 /* Copy context to QP1C register */
2594 addr = (u32 __iomem *)(hr_dev->reg_base +
2595 ROCEE_QP1C_CFG0_0_REG +
2596 hr_qp->phy_port * sizeof(*context));
2598 writel(le32_to_cpu(context->qp1c_bytes_4), addr);
2599 writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
2600 writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
2601 writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
2602 writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
2603 writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
2604 writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
2605 writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
2606 writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
2607 writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
2610 /* Modify QP1C status */
2611 reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2612 hr_qp->phy_port * sizeof(*context));
2613 tmp = cpu_to_le32(reg_val);
2614 roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2615 ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2616 reg_val = le32_to_cpu(tmp);
2617 roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2618 hr_qp->phy_port * sizeof(*context), reg_val);
2620 hr_qp->state = new_state;
2621 if (new_state == IB_QPS_RESET) {
2622 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2623 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2624 if (ibqp->send_cq != ibqp->recv_cq)
2625 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2642 static bool check_qp_state(enum ib_qp_state cur_state,
2643 enum ib_qp_state new_state)
2645 static const bool sm[][IB_QPS_ERR + 1] = {
2646 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
2647 [IB_QPS_INIT] = true },
2648 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
2649 [IB_QPS_INIT] = true,
2650 [IB_QPS_RTR] = true,
2651 [IB_QPS_ERR] = true },
2652 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
2653 [IB_QPS_RTS] = true,
2654 [IB_QPS_ERR] = true },
2655 [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
2658 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
2661 return sm[cur_state][new_state];
2664 static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2665 int attr_mask, enum ib_qp_state cur_state,
2666 enum ib_qp_state new_state)
2668 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2669 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2670 struct device *dev = &hr_dev->pdev->dev;
2671 struct hns_roce_qp_context *context;
2672 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2673 dma_addr_t dma_handle_2 = 0;
2674 dma_addr_t dma_handle = 0;
2675 __le32 doorbell[2] = {0};
2685 if (!check_qp_state(cur_state, new_state)) {
2686 ibdev_err(ibqp->device,
2687 "not support QP(%u) status from %d to %d\n",
2688 ibqp->qp_num, cur_state, new_state);
2692 context = kzalloc(sizeof(*context), GFP_KERNEL);
2696 /* Search qp buf's mtts */
2697 if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
2700 /* Search IRRL's mtts */
2701 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2702 hr_qp->qpn, &dma_handle_2);
2703 if (mtts_2 == NULL) {
2704 dev_err(dev, "qp irrl_table find failed\n");
2711 * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2712 * Optional param: NA
2714 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2715 roce_set_field(context->qpc_bytes_4,
2716 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2717 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2718 to_hr_qp_type(hr_qp->ibqp.qp_type));
2720 roce_set_bit(context->qpc_bytes_4,
2721 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2722 roce_set_bit(context->qpc_bytes_4,
2723 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2724 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2725 roce_set_bit(context->qpc_bytes_4,
2726 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2727 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2729 roce_set_bit(context->qpc_bytes_4,
2730 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2731 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2733 roce_set_bit(context->qpc_bytes_4,
2734 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2735 roce_set_field(context->qpc_bytes_4,
2736 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2737 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2738 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2739 roce_set_field(context->qpc_bytes_4,
2740 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2741 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2742 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2743 roce_set_field(context->qpc_bytes_4,
2744 QP_CONTEXT_QPC_BYTES_4_PD_M,
2745 QP_CONTEXT_QPC_BYTES_4_PD_S,
2746 to_hr_pd(ibqp->pd)->pdn);
2747 hr_qp->access_flags = attr->qp_access_flags;
2748 roce_set_field(context->qpc_bytes_8,
2749 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2750 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2751 to_hr_cq(ibqp->send_cq)->cqn);
2752 roce_set_field(context->qpc_bytes_8,
2753 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2754 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2755 to_hr_cq(ibqp->recv_cq)->cqn);
2758 roce_set_field(context->qpc_bytes_12,
2759 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2760 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2761 to_hr_srq(ibqp->srq)->srqn);
2763 roce_set_field(context->qpc_bytes_12,
2764 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2765 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2767 hr_qp->pkey_index = attr->pkey_index;
2768 roce_set_field(context->qpc_bytes_16,
2769 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2770 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2772 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2773 roce_set_field(context->qpc_bytes_4,
2774 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2775 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2776 to_hr_qp_type(hr_qp->ibqp.qp_type));
2777 roce_set_bit(context->qpc_bytes_4,
2778 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2779 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2780 roce_set_bit(context->qpc_bytes_4,
2781 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2782 !!(attr->qp_access_flags &
2783 IB_ACCESS_REMOTE_READ));
2784 roce_set_bit(context->qpc_bytes_4,
2785 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2786 !!(attr->qp_access_flags &
2787 IB_ACCESS_REMOTE_WRITE));
2789 roce_set_bit(context->qpc_bytes_4,
2790 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2791 !!(hr_qp->access_flags &
2792 IB_ACCESS_REMOTE_READ));
2793 roce_set_bit(context->qpc_bytes_4,
2794 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2795 !!(hr_qp->access_flags &
2796 IB_ACCESS_REMOTE_WRITE));
2799 roce_set_bit(context->qpc_bytes_4,
2800 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2801 roce_set_field(context->qpc_bytes_4,
2802 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2803 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2804 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2805 roce_set_field(context->qpc_bytes_4,
2806 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2807 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2808 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2809 roce_set_field(context->qpc_bytes_4,
2810 QP_CONTEXT_QPC_BYTES_4_PD_M,
2811 QP_CONTEXT_QPC_BYTES_4_PD_S,
2812 to_hr_pd(ibqp->pd)->pdn);
2814 roce_set_field(context->qpc_bytes_8,
2815 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2816 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2817 to_hr_cq(ibqp->send_cq)->cqn);
2818 roce_set_field(context->qpc_bytes_8,
2819 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2820 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2821 to_hr_cq(ibqp->recv_cq)->cqn);
2824 roce_set_field(context->qpc_bytes_12,
2825 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2826 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2827 to_hr_srq(ibqp->srq)->srqn);
2828 if (attr_mask & IB_QP_PKEY_INDEX)
2829 roce_set_field(context->qpc_bytes_12,
2830 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2831 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2834 roce_set_field(context->qpc_bytes_12,
2835 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2836 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2839 roce_set_field(context->qpc_bytes_16,
2840 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2841 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2842 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2843 if ((attr_mask & IB_QP_ALT_PATH) ||
2844 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2845 (attr_mask & IB_QP_PKEY_INDEX) ||
2846 (attr_mask & IB_QP_QKEY)) {
2847 dev_err(dev, "INIT2RTR attr_mask error\n");
2851 dmac = (u8 *)attr->ah_attr.roce.dmac;
2853 context->sq_rq_bt_l = cpu_to_le32(dma_handle);
2854 roce_set_field(context->qpc_bytes_24,
2855 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2856 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2857 upper_32_bits(dma_handle));
2858 roce_set_bit(context->qpc_bytes_24,
2859 QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2861 roce_set_field(context->qpc_bytes_24,
2862 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2863 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2864 attr->min_rnr_timer);
2865 context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
2866 roce_set_field(context->qpc_bytes_32,
2867 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2868 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2869 ((u32)(dma_handle_2 >> 32)) &
2870 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2871 roce_set_field(context->qpc_bytes_32,
2872 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2873 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2874 roce_set_bit(context->qpc_bytes_32,
2875 QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2877 roce_set_bit(context->qpc_bytes_32,
2878 QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2879 hr_qp->sq_signal_bits);
2881 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2883 smac = (u8 *)hr_dev->dev_addr[port];
2884 /* when dmac equals smac or loop_idc is 1, it should loopback */
2885 if (ether_addr_equal_unaligned(dmac, smac) ||
2886 hr_dev->loop_idc == 0x1)
2887 roce_set_bit(context->qpc_bytes_32,
2888 QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2890 roce_set_bit(context->qpc_bytes_32,
2891 QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2892 rdma_ah_get_ah_flags(&attr->ah_attr));
2893 roce_set_field(context->qpc_bytes_32,
2894 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2895 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2896 ilog2((unsigned int)attr->max_dest_rd_atomic));
2898 if (attr_mask & IB_QP_DEST_QPN)
2899 roce_set_field(context->qpc_bytes_36,
2900 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2901 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2904 /* Configure GID index */
2905 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2906 roce_set_field(context->qpc_bytes_36,
2907 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2908 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2909 hns_get_gid_index(hr_dev,
2913 memcpy(&(context->dmac_l), dmac, 4);
2915 roce_set_field(context->qpc_bytes_44,
2916 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2917 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2918 *((u16 *)(&dmac[4])));
2919 roce_set_field(context->qpc_bytes_44,
2920 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2921 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2922 rdma_ah_get_static_rate(&attr->ah_attr));
2923 roce_set_field(context->qpc_bytes_44,
2924 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2925 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2928 roce_set_field(context->qpc_bytes_48,
2929 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2930 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2932 roce_set_field(context->qpc_bytes_48,
2933 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2934 QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2935 grh->traffic_class);
2936 roce_set_field(context->qpc_bytes_48,
2937 QP_CONTEXT_QPC_BYTES_48_MTU_M,
2938 QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2940 memcpy(context->dgid, grh->dgid.raw,
2941 sizeof(grh->dgid.raw));
2943 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2944 roce_get_field(context->qpc_bytes_44,
2945 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2946 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2948 roce_set_field(context->qpc_bytes_68,
2949 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2950 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2952 roce_set_field(context->qpc_bytes_68,
2953 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
2954 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
2956 context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
2958 roce_set_field(context->qpc_bytes_76,
2959 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
2960 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
2961 upper_32_bits(rq_ba));
2962 roce_set_field(context->qpc_bytes_76,
2963 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
2964 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
2966 context->rx_rnr_time = 0;
2968 roce_set_field(context->qpc_bytes_84,
2969 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
2970 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
2972 roce_set_field(context->qpc_bytes_84,
2973 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
2974 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
2976 roce_set_field(context->qpc_bytes_88,
2977 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
2978 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
2980 roce_set_bit(context->qpc_bytes_88,
2981 QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
2982 roce_set_bit(context->qpc_bytes_88,
2983 QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
2984 roce_set_field(context->qpc_bytes_88,
2985 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
2986 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
2988 roce_set_field(context->qpc_bytes_88,
2989 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
2990 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
2993 context->dma_length = 0;
2998 roce_set_field(context->qpc_bytes_108,
2999 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3000 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3001 roce_set_bit(context->qpc_bytes_108,
3002 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3003 roce_set_bit(context->qpc_bytes_108,
3004 QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3006 roce_set_field(context->qpc_bytes_112,
3007 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3008 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3009 roce_set_field(context->qpc_bytes_112,
3010 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3011 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3013 /* For chip resp ack */
3014 roce_set_field(context->qpc_bytes_156,
3015 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3016 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3018 roce_set_field(context->qpc_bytes_156,
3019 QP_CONTEXT_QPC_BYTES_156_SL_M,
3020 QP_CONTEXT_QPC_BYTES_156_SL_S,
3021 rdma_ah_get_sl(&attr->ah_attr));
3022 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3023 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3024 /* If exist optional param, return error */
3025 if ((attr_mask & IB_QP_ALT_PATH) ||
3026 (attr_mask & IB_QP_ACCESS_FLAGS) ||
3027 (attr_mask & IB_QP_QKEY) ||
3028 (attr_mask & IB_QP_PATH_MIG_STATE) ||
3029 (attr_mask & IB_QP_CUR_STATE) ||
3030 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3031 dev_err(dev, "RTR2RTS attr_mask error\n");
3035 context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
3037 roce_set_field(context->qpc_bytes_120,
3038 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3039 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3040 upper_32_bits(sq_ba));
3042 roce_set_field(context->qpc_bytes_124,
3043 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3044 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3045 roce_set_field(context->qpc_bytes_124,
3046 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3047 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3049 roce_set_field(context->qpc_bytes_128,
3050 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3051 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3053 roce_set_bit(context->qpc_bytes_128,
3054 QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3055 roce_set_field(context->qpc_bytes_128,
3056 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3057 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3059 roce_set_bit(context->qpc_bytes_128,
3060 QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3062 roce_set_field(context->qpc_bytes_132,
3063 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3064 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3065 roce_set_field(context->qpc_bytes_132,
3066 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3067 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3069 roce_set_field(context->qpc_bytes_136,
3070 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3071 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3073 roce_set_field(context->qpc_bytes_136,
3074 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3075 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3078 roce_set_field(context->qpc_bytes_140,
3079 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3080 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3081 (attr->sq_psn >> SQ_PSN_SHIFT));
3082 roce_set_field(context->qpc_bytes_140,
3083 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3084 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3085 roce_set_bit(context->qpc_bytes_140,
3086 QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3088 roce_set_field(context->qpc_bytes_148,
3089 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3090 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3091 roce_set_field(context->qpc_bytes_148,
3092 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3093 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3095 roce_set_field(context->qpc_bytes_148,
3096 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3097 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3099 roce_set_field(context->qpc_bytes_148,
3100 QP_CONTEXT_QPC_BYTES_148_LSN_M,
3101 QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3103 context->rnr_retry = 0;
3105 roce_set_field(context->qpc_bytes_156,
3106 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3107 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3109 if (attr->timeout < 0x12) {
3110 dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3112 roce_set_field(context->qpc_bytes_156,
3113 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3114 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3117 roce_set_field(context->qpc_bytes_156,
3118 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3119 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3122 roce_set_field(context->qpc_bytes_156,
3123 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3124 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3126 roce_set_field(context->qpc_bytes_156,
3127 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3128 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3130 roce_set_field(context->qpc_bytes_156,
3131 QP_CONTEXT_QPC_BYTES_156_SL_M,
3132 QP_CONTEXT_QPC_BYTES_156_SL_S,
3133 rdma_ah_get_sl(&attr->ah_attr));
3134 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3135 roce_set_field(context->qpc_bytes_156,
3136 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3137 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3138 ilog2((unsigned int)attr->max_rd_atomic));
3139 roce_set_field(context->qpc_bytes_156,
3140 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3141 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3142 context->pkt_use_len = 0;
3144 roce_set_field(context->qpc_bytes_164,
3145 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3146 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3147 roce_set_field(context->qpc_bytes_164,
3148 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3149 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3151 roce_set_field(context->qpc_bytes_168,
3152 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3153 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3155 roce_set_field(context->qpc_bytes_168,
3156 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3157 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3158 roce_set_field(context->qpc_bytes_168,
3159 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3160 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3161 roce_set_bit(context->qpc_bytes_168,
3162 QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3163 roce_set_bit(context->qpc_bytes_168,
3164 QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3165 roce_set_bit(context->qpc_bytes_168,
3166 QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3167 context->sge_use_len = 0;
3169 roce_set_field(context->qpc_bytes_176,
3170 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3171 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3172 roce_set_field(context->qpc_bytes_176,
3173 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3174 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3176 roce_set_field(context->qpc_bytes_180,
3177 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3178 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3179 roce_set_field(context->qpc_bytes_180,
3180 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3181 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3183 context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
3185 roce_set_field(context->qpc_bytes_188,
3186 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3187 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3188 upper_32_bits(sq_ba));
3189 roce_set_bit(context->qpc_bytes_188,
3190 QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3191 roce_set_field(context->qpc_bytes_188,
3192 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3193 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3197 /* Every status migrate must change state */
3198 roce_set_field(context->qpc_bytes_144,
3199 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3200 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3202 /* SW pass context to HW */
3203 ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
3204 to_hns_roce_state(new_state), context,
3207 dev_err(dev, "hns_roce_qp_modify failed\n");
3212 * Use rst2init to instead of init2init with drv,
3213 * need to hw to flash RQ HEAD by DB again
3215 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3216 /* Memory barrier */
3219 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3220 RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3221 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3222 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3223 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3224 RQ_DOORBELL_U32_8_CMD_S, 1);
3225 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3227 if (ibqp->uobject) {
3228 hr_qp->rq.db_reg_l = hr_dev->reg_base +
3229 hr_dev->odb_offset +
3230 DB_REG_OFFSET * hr_dev->priv_uar.index;
3233 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3236 hr_qp->state = new_state;
3238 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3239 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3240 if (attr_mask & IB_QP_PORT) {
3241 hr_qp->port = attr->port_num - 1;
3242 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3245 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3246 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3247 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3248 if (ibqp->send_cq != ibqp->recv_cq)
3249 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3262 static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3263 const struct ib_qp_attr *attr, int attr_mask,
3264 enum ib_qp_state cur_state,
3265 enum ib_qp_state new_state)
3268 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3269 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3272 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3276 static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3279 case HNS_ROCE_QP_STATE_RST:
3280 return IB_QPS_RESET;
3281 case HNS_ROCE_QP_STATE_INIT:
3283 case HNS_ROCE_QP_STATE_RTR:
3285 case HNS_ROCE_QP_STATE_RTS:
3287 case HNS_ROCE_QP_STATE_SQD:
3289 case HNS_ROCE_QP_STATE_ERR:
3296 static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3297 struct hns_roce_qp *hr_qp,
3298 struct hns_roce_qp_context *hr_context)
3300 struct hns_roce_cmd_mailbox *mailbox;
3303 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3304 if (IS_ERR(mailbox))
3305 return PTR_ERR(mailbox);
3307 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3308 HNS_ROCE_CMD_QUERY_QP,
3309 HNS_ROCE_CMD_TIMEOUT_MSECS);
3311 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3313 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3315 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3320 static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3322 struct ib_qp_init_attr *qp_init_attr)
3324 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3325 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3326 struct hns_roce_sqp_context context;
3329 mutex_lock(&hr_qp->mutex);
3331 if (hr_qp->state == IB_QPS_RESET) {
3332 qp_attr->qp_state = IB_QPS_RESET;
3336 addr = ROCEE_QP1C_CFG0_0_REG +
3337 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3338 context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
3339 context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
3340 context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
3341 context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
3342 context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
3343 context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
3344 context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
3345 context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
3346 context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
3347 context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
3349 hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3350 QP1C_BYTES_4_QP_STATE_M,
3351 QP1C_BYTES_4_QP_STATE_S);
3352 qp_attr->qp_state = hr_qp->state;
3353 qp_attr->path_mtu = IB_MTU_256;
3354 qp_attr->path_mig_state = IB_MIG_ARMED;
3355 qp_attr->qkey = QKEY_VAL;
3356 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3357 qp_attr->rq_psn = 0;
3358 qp_attr->sq_psn = 0;
3359 qp_attr->dest_qp_num = 1;
3360 qp_attr->qp_access_flags = 6;
3362 qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3363 QP1C_BYTES_20_PKEY_IDX_M,
3364 QP1C_BYTES_20_PKEY_IDX_S);
3365 qp_attr->port_num = hr_qp->port + 1;
3366 qp_attr->sq_draining = 0;
3367 qp_attr->max_rd_atomic = 0;
3368 qp_attr->max_dest_rd_atomic = 0;
3369 qp_attr->min_rnr_timer = 0;
3370 qp_attr->timeout = 0;
3371 qp_attr->retry_cnt = 0;
3372 qp_attr->rnr_retry = 0;
3373 qp_attr->alt_timeout = 0;
3376 qp_attr->cur_qp_state = qp_attr->qp_state;
3377 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3378 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3379 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3380 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3381 qp_attr->cap.max_inline_data = 0;
3382 qp_init_attr->cap = qp_attr->cap;
3383 qp_init_attr->create_flags = 0;
3385 mutex_unlock(&hr_qp->mutex);
3390 static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3392 struct ib_qp_init_attr *qp_init_attr)
3394 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3395 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3396 struct device *dev = &hr_dev->pdev->dev;
3397 struct hns_roce_qp_context *context;
3398 int tmp_qp_state = 0;
3402 context = kzalloc(sizeof(*context), GFP_KERNEL);
3406 memset(qp_attr, 0, sizeof(*qp_attr));
3407 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3409 mutex_lock(&hr_qp->mutex);
3411 if (hr_qp->state == IB_QPS_RESET) {
3412 qp_attr->qp_state = IB_QPS_RESET;
3416 ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3418 dev_err(dev, "query qpc error\n");
3423 state = roce_get_field(context->qpc_bytes_144,
3424 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3425 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3426 tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3427 if (tmp_qp_state == -1) {
3428 dev_err(dev, "to_ib_qp_state error\n");
3432 hr_qp->state = (u8)tmp_qp_state;
3433 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3434 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3435 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3436 QP_CONTEXT_QPC_BYTES_48_MTU_S);
3437 qp_attr->path_mig_state = IB_MIG_ARMED;
3438 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3439 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3440 qp_attr->qkey = QKEY_VAL;
3442 qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3443 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3444 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3445 qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3446 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3447 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3448 qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3449 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3450 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3451 qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3452 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3453 ((roce_get_bit(context->qpc_bytes_4,
3454 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3455 ((roce_get_bit(context->qpc_bytes_4,
3456 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3458 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3459 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3460 struct ib_global_route *grh =
3461 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3463 rdma_ah_set_sl(&qp_attr->ah_attr,
3464 roce_get_field(context->qpc_bytes_156,
3465 QP_CONTEXT_QPC_BYTES_156_SL_M,
3466 QP_CONTEXT_QPC_BYTES_156_SL_S));
3467 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3469 roce_get_field(context->qpc_bytes_48,
3470 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3471 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3473 roce_get_field(context->qpc_bytes_36,
3474 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3475 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3477 roce_get_field(context->qpc_bytes_44,
3478 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3479 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3480 grh->traffic_class =
3481 roce_get_field(context->qpc_bytes_48,
3482 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3483 QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3485 memcpy(grh->dgid.raw, context->dgid,
3486 sizeof(grh->dgid.raw));
3489 qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3490 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3491 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3492 qp_attr->port_num = hr_qp->port + 1;
3493 qp_attr->sq_draining = 0;
3494 qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3495 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3496 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3497 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3498 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3499 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3500 qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3501 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3502 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3503 qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3504 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3505 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3506 qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3507 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3508 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3509 qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
3512 qp_attr->cur_qp_state = qp_attr->qp_state;
3513 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3514 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3516 if (!ibqp->uobject) {
3517 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3518 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3520 qp_attr->cap.max_send_wr = 0;
3521 qp_attr->cap.max_send_sge = 0;
3524 qp_init_attr->cap = qp_attr->cap;
3527 mutex_unlock(&hr_qp->mutex);
3532 static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3534 struct ib_qp_init_attr *qp_init_attr)
3536 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3538 return hr_qp->doorbell_qpn <= 1 ?
3539 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3540 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3543 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3545 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3546 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3547 struct hns_roce_cq *send_cq, *recv_cq;
3550 ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
3554 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
3555 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
3557 hns_roce_lock_cqs(send_cq, recv_cq);
3560 __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
3562 to_hr_srq(hr_qp->ibqp.srq) :
3565 if (send_cq && send_cq != recv_cq)
3566 __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3568 hns_roce_qp_remove(hr_dev, hr_qp);
3569 hns_roce_unlock_cqs(send_cq, recv_cq);
3571 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
3576 static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
3578 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3579 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3580 struct device *dev = &hr_dev->pdev->dev;
3586 * Before freeing cq buffer, we need to ensure that the outstanding CQE
3587 * have been written by checking the CQE counter.
3589 cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3591 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3592 HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3595 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3596 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3599 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3600 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3601 dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3609 static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3611 roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3612 (req_not << eq->log_entries), eq->doorbell);
3615 static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3616 struct hns_roce_aeqe *aeqe, int qpn)
3618 struct device *dev = &hr_dev->pdev->dev;
3620 dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3621 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3622 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3623 case HNS_ROCE_LWQCE_QPC_ERROR:
3624 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3626 case HNS_ROCE_LWQCE_MTU_ERROR:
3627 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3629 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3630 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3632 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3633 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3635 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3636 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3638 case HNS_ROCE_LWQCE_SL_ERROR:
3639 dev_warn(dev, "QP %d, SL error.\n", qpn);
3641 case HNS_ROCE_LWQCE_PORT_ERROR:
3642 dev_warn(dev, "QP %d, port error.\n", qpn);
3649 static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3650 struct hns_roce_aeqe *aeqe,
3653 struct device *dev = &hr_dev->pdev->dev;
3655 dev_warn(dev, "Local Access Violation Work Queue Error.\n");
3656 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3657 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3658 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3659 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3661 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3662 dev_warn(dev, "QP %d, length error.\n", qpn);
3664 case HNS_ROCE_LAVWQE_VA_ERROR:
3665 dev_warn(dev, "QP %d, VA error.\n", qpn);
3667 case HNS_ROCE_LAVWQE_PD_ERROR:
3668 dev_err(dev, "QP %d, PD error.\n", qpn);
3670 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3671 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3673 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3674 dev_warn(dev, "QP %d, key state error.\n", qpn);
3676 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3677 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3684 static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
3685 struct hns_roce_aeqe *aeqe,
3688 struct device *dev = &hr_dev->pdev->dev;
3692 qpn = roce_get_field(aeqe->event.qp_event.qp,
3693 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
3694 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
3695 phy_port = roce_get_field(aeqe->event.qp_event.qp,
3696 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
3697 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
3699 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
3701 switch (event_type) {
3702 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3703 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
3704 "QP %d, phy_port %d.\n", qpn, phy_port);
3706 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3707 hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
3709 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3710 hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3716 hns_roce_qp_event(hr_dev, qpn, event_type);
3719 static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
3720 struct hns_roce_aeqe *aeqe,
3723 struct device *dev = &hr_dev->pdev->dev;
3726 cqn = roce_get_field(aeqe->event.cq_event.cq,
3727 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
3728 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
3730 switch (event_type) {
3731 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3732 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3734 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3735 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3737 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3738 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
3744 hns_roce_cq_event(hr_dev, cqn, event_type);
3747 static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
3748 struct hns_roce_aeqe *aeqe)
3750 struct device *dev = &hr_dev->pdev->dev;
3752 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3753 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3754 case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
3755 dev_warn(dev, "SDB overflow.\n");
3757 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
3758 dev_warn(dev, "SDB almost overflow.\n");
3760 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
3761 dev_warn(dev, "SDB almost empty.\n");
3763 case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
3764 dev_warn(dev, "ODB overflow.\n");
3766 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
3767 dev_warn(dev, "ODB almost overflow.\n");
3769 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
3770 dev_warn(dev, "SDB almost empty.\n");
3777 static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
3779 unsigned long off = (entry & (eq->entries - 1)) *
3780 HNS_ROCE_AEQ_ENTRY_SIZE;
3782 return (struct hns_roce_aeqe *)((u8 *)
3783 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3784 off % HNS_ROCE_BA_SIZE);
3787 static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
3789 struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
3791 return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
3792 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3795 static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
3796 struct hns_roce_eq *eq)
3798 struct device *dev = &hr_dev->pdev->dev;
3799 struct hns_roce_aeqe *aeqe;
3800 int aeqes_found = 0;
3803 while ((aeqe = next_aeqe_sw_v1(eq))) {
3805 /* Make sure we read the AEQ entry after we have checked the
3810 dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
3812 roce_get_field(aeqe->asyn,
3813 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3814 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
3815 event_type = roce_get_field(aeqe->asyn,
3816 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3817 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
3818 switch (event_type) {
3819 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3820 dev_warn(dev, "PATH MIG not supported\n");
3822 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3823 dev_warn(dev, "COMMUNICATION established\n");
3825 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3826 dev_warn(dev, "SQ DRAINED not supported\n");
3828 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3829 dev_warn(dev, "PATH MIG failed\n");
3831 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3832 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3833 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3834 hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
3836 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3837 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3838 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3839 dev_warn(dev, "SRQ not support!\n");
3841 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3842 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3843 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3844 hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
3846 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
3847 dev_warn(dev, "port change.\n");
3849 case HNS_ROCE_EVENT_TYPE_MB:
3850 hns_roce_cmd_event(hr_dev,
3851 le16_to_cpu(aeqe->event.cmd.token),
3852 aeqe->event.cmd.status,
3853 le64_to_cpu(aeqe->event.cmd.out_param
3856 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3857 hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
3859 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3860 dev_warn(dev, "CEQ 0x%lx overflow.\n",
3861 roce_get_field(aeqe->event.ce_event.ceqe,
3862 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
3863 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
3866 dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3867 event_type, eq->eqn, eq->cons_index);
3874 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
3878 set_eq_cons_index_v1(eq, 0);
3883 static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
3885 unsigned long off = (entry & (eq->entries - 1)) *
3886 HNS_ROCE_CEQ_ENTRY_SIZE;
3888 return (struct hns_roce_ceqe *)((u8 *)
3889 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3890 off % HNS_ROCE_BA_SIZE);
3893 static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
3895 struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
3897 return (!!(roce_get_bit(ceqe->comp,
3898 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
3899 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3902 static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
3903 struct hns_roce_eq *eq)
3905 struct hns_roce_ceqe *ceqe;
3906 int ceqes_found = 0;
3909 while ((ceqe = next_ceqe_sw_v1(eq))) {
3911 /* Make sure we read CEQ entry after we have checked the
3916 cqn = roce_get_field(ceqe->comp,
3917 HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
3918 HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
3919 hns_roce_cq_completion(hr_dev, cqn);
3924 if (eq->cons_index >
3925 EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
3929 set_eq_cons_index_v1(eq, 0);
3934 static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
3936 struct hns_roce_eq *eq = eq_ptr;
3937 struct hns_roce_dev *hr_dev = eq->hr_dev;
3940 if (eq->type_flag == HNS_ROCE_CEQ)
3941 /* CEQ irq routine, CEQ is pulse irq, not clear */
3942 int_work = hns_roce_v1_ceq_int(hr_dev, eq);
3944 /* AEQ irq routine, AEQ is pulse irq, not clear */
3945 int_work = hns_roce_v1_aeq_int(hr_dev, eq);
3947 return IRQ_RETVAL(int_work);
3950 static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
3952 struct hns_roce_dev *hr_dev = dev_id;
3953 struct device *dev = &hr_dev->pdev->dev;
3965 * Abnormal interrupt:
3966 * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
3967 * interrupt, mask irq, clear irq, cancel mask operation
3969 aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
3970 tmp = cpu_to_le32(aeshift_val);
3973 if (roce_get_bit(tmp,
3974 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
3975 dev_warn(dev, "AEQ overflow!\n");
3978 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
3979 tmp = cpu_to_le32(caepaemask_val);
3980 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
3981 HNS_ROCE_INT_MASK_ENABLE);
3982 caepaemask_val = le32_to_cpu(tmp);
3983 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
3985 /* Clear int state(INT_WC : write 1 clear) */
3986 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
3987 tmp = cpu_to_le32(caepaest_val);
3988 roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
3989 caepaest_val = le32_to_cpu(tmp);
3990 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
3993 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
3994 tmp = cpu_to_le32(caepaemask_val);
3995 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
3996 HNS_ROCE_INT_MASK_DISABLE);
3997 caepaemask_val = le32_to_cpu(tmp);
3998 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4001 /* CEQ almost overflow */
4002 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4003 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4004 i * CEQ_REG_OFFSET);
4005 tmp = cpu_to_le32(ceshift_val);
4007 if (roce_get_bit(tmp,
4008 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4009 dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4013 cemask_val = roce_read(hr_dev,
4014 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4015 i * CEQ_REG_OFFSET);
4016 tmp = cpu_to_le32(cemask_val);
4018 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4019 HNS_ROCE_INT_MASK_ENABLE);
4020 cemask_val = le32_to_cpu(tmp);
4021 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4022 i * CEQ_REG_OFFSET, cemask_val);
4024 /* Clear int state(INT_WC : write 1 clear) */
4025 cealmovf_val = roce_read(hr_dev,
4026 ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4027 i * CEQ_REG_OFFSET);
4028 tmp = cpu_to_le32(cealmovf_val);
4030 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4032 cealmovf_val = le32_to_cpu(tmp);
4033 roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4034 i * CEQ_REG_OFFSET, cealmovf_val);
4037 cemask_val = roce_read(hr_dev,
4038 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4039 i * CEQ_REG_OFFSET);
4040 tmp = cpu_to_le32(cemask_val);
4042 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4043 HNS_ROCE_INT_MASK_DISABLE);
4044 cemask_val = le32_to_cpu(tmp);
4045 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4046 i * CEQ_REG_OFFSET, cemask_val);
4050 /* ECC multi-bit error alarm */
4051 dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4052 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4053 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4054 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4056 dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4057 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4058 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4059 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4061 return IRQ_RETVAL(int_work);
4064 static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4072 aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4073 tmp = cpu_to_le32(aemask_val);
4074 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4076 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4077 aemask_val = le32_to_cpu(tmp);
4078 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4081 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4083 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4084 i * CEQ_REG_OFFSET, masken);
4088 static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4089 struct hns_roce_eq *eq)
4091 int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4092 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4098 for (i = 0; i < npages; ++i)
4099 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4100 eq->buf_list[i].buf, eq->buf_list[i].map);
4102 kfree(eq->buf_list);
4105 static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4108 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4113 tmp = cpu_to_le32(val);
4117 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4118 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4119 HNS_ROCE_EQ_STAT_VALID);
4122 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4123 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4124 HNS_ROCE_EQ_STAT_INVALID);
4126 val = le32_to_cpu(tmp);
4130 static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4131 struct hns_roce_eq *eq)
4133 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4134 struct device *dev = &hr_dev->pdev->dev;
4135 dma_addr_t tmp_dma_addr;
4136 u32 eqconsindx_val = 0;
4137 u32 eqcuridx_val = 0;
4138 u32 eqshift_val = 0;
4146 num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4147 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4149 if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4150 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4151 (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4156 eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4160 for (i = 0; i < num_bas; ++i) {
4161 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4164 if (!eq->buf_list[i].buf) {
4166 goto err_out_free_pages;
4169 eq->buf_list[i].map = tmp_dma_addr;
4172 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4173 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4174 HNS_ROCE_EQ_STAT_INVALID);
4175 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4176 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4178 eqshift_val = le32_to_cpu(tmp);
4179 writel(eqshift_val, eqc);
4181 /* Configure eq extended address 12~44bit */
4182 writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4185 * Configure eq extended address 45~49 bit.
4186 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4187 * using 4K page, and shift more 32 because of
4188 * caculating the high 32 bit value evaluated to hardware.
4190 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4191 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4192 eq->buf_list[0].map >> 44);
4193 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4194 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4195 eqcuridx_val = le32_to_cpu(tmp1);
4196 writel(eqcuridx_val, eqc + 8);
4198 /* Configure eq consumer index */
4199 roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4200 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4201 eqconsindx_val = le32_to_cpu(tmp2);
4202 writel(eqconsindx_val, eqc + 0xc);
4207 for (i -= 1; i >= 0; i--)
4208 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4209 eq->buf_list[i].map);
4211 kfree(eq->buf_list);
4215 static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4217 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4218 struct device *dev = &hr_dev->pdev->dev;
4219 struct hns_roce_eq *eq;
4225 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4226 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4228 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4232 eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4234 if (!eq_table->eqc_base) {
4236 goto err_eqc_base_alloc_fail;
4239 for (i = 0; i < eq_num; i++) {
4240 eq = &eq_table->eq[i];
4241 eq->hr_dev = hr_dev;
4243 eq->irq = hr_dev->irq[i];
4244 eq->log_page_size = PAGE_SHIFT;
4246 if (i < hr_dev->caps.num_comp_vectors) {
4248 eq_table->eqc_base[i] = hr_dev->reg_base +
4249 ROCEE_CAEP_CEQC_SHIFT_0_REG +
4251 eq->type_flag = HNS_ROCE_CEQ;
4252 eq->doorbell = hr_dev->reg_base +
4253 ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4255 eq->entries = hr_dev->caps.ceqe_depth;
4256 eq->log_entries = ilog2(eq->entries);
4257 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4260 eq_table->eqc_base[i] = hr_dev->reg_base +
4261 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4262 eq->type_flag = HNS_ROCE_AEQ;
4263 eq->doorbell = hr_dev->reg_base +
4264 ROCEE_CAEP_AEQE_CONS_IDX_REG;
4265 eq->entries = hr_dev->caps.aeqe_depth;
4266 eq->log_entries = ilog2(eq->entries);
4267 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4272 hns_roce_v1_int_mask_enable(hr_dev);
4274 /* Configure ce int interval */
4275 roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4276 HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4278 /* Configure ce int burst num */
4279 roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4280 HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4282 for (i = 0; i < eq_num; i++) {
4283 ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4285 dev_err(dev, "eq create failed\n");
4286 goto err_create_eq_fail;
4290 for (j = 0; j < irq_num; j++) {
4292 ret = request_irq(hr_dev->irq[j],
4293 hns_roce_v1_msix_interrupt_eq, 0,
4294 hr_dev->irq_names[j],
4297 ret = request_irq(hr_dev->irq[j],
4298 hns_roce_v1_msix_interrupt_abn, 0,
4299 hr_dev->irq_names[j], hr_dev);
4302 dev_err(dev, "request irq error!\n");
4303 goto err_request_irq_fail;
4307 for (i = 0; i < eq_num; i++)
4308 hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4312 err_request_irq_fail:
4313 for (j -= 1; j >= 0; j--)
4314 free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4317 for (i -= 1; i >= 0; i--)
4318 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4320 kfree(eq_table->eqc_base);
4322 err_eqc_base_alloc_fail:
4323 kfree(eq_table->eq);
4328 static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4330 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4335 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4336 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4337 for (i = 0; i < eq_num; i++) {
4339 hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4341 free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4343 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4345 for (i = eq_num; i < irq_num; i++)
4346 free_irq(hr_dev->irq[i], hr_dev);
4348 kfree(eq_table->eqc_base);
4349 kfree(eq_table->eq);
4352 static const struct ib_device_ops hns_roce_v1_dev_ops = {
4353 .destroy_qp = hns_roce_v1_destroy_qp,
4354 .modify_cq = hns_roce_v1_modify_cq,
4355 .poll_cq = hns_roce_v1_poll_cq,
4356 .post_recv = hns_roce_v1_post_recv,
4357 .post_send = hns_roce_v1_post_send,
4358 .query_qp = hns_roce_v1_query_qp,
4359 .req_notify_cq = hns_roce_v1_req_notify_cq,
4362 static const struct hns_roce_hw hns_roce_hw_v1 = {
4363 .reset = hns_roce_v1_reset,
4364 .hw_profile = hns_roce_v1_profile,
4365 .hw_init = hns_roce_v1_init,
4366 .hw_exit = hns_roce_v1_exit,
4367 .post_mbox = hns_roce_v1_post_mbox,
4368 .chk_mbox = hns_roce_v1_chk_mbox,
4369 .set_gid = hns_roce_v1_set_gid,
4370 .set_mac = hns_roce_v1_set_mac,
4371 .set_mtu = hns_roce_v1_set_mtu,
4372 .write_mtpt = hns_roce_v1_write_mtpt,
4373 .write_cqc = hns_roce_v1_write_cqc,
4374 .modify_cq = hns_roce_v1_modify_cq,
4375 .clear_hem = hns_roce_v1_clear_hem,
4376 .modify_qp = hns_roce_v1_modify_qp,
4377 .query_qp = hns_roce_v1_query_qp,
4378 .destroy_qp = hns_roce_v1_destroy_qp,
4379 .post_send = hns_roce_v1_post_send,
4380 .post_recv = hns_roce_v1_post_recv,
4381 .req_notify_cq = hns_roce_v1_req_notify_cq,
4382 .poll_cq = hns_roce_v1_poll_cq,
4383 .dereg_mr = hns_roce_v1_dereg_mr,
4384 .destroy_cq = hns_roce_v1_destroy_cq,
4385 .init_eq = hns_roce_v1_init_eq_table,
4386 .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4387 .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
4390 static const struct of_device_id hns_roce_of_match[] = {
4391 { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4394 MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4396 static const struct acpi_device_id hns_roce_acpi_match[] = {
4397 { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4400 MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4403 platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4407 /* get the 'device' corresponding to the matching 'fwnode' */
4408 dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
4409 /* get the platform device */
4410 return dev ? to_platform_device(dev) : NULL;
4413 static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4415 struct device *dev = &hr_dev->pdev->dev;
4416 struct platform_device *pdev = NULL;
4417 struct net_device *netdev = NULL;
4418 struct device_node *net_node;
4424 /* check if we are compatible with the underlying SoC */
4425 if (dev_of_node(dev)) {
4426 const struct of_device_id *of_id;
4428 of_id = of_match_node(hns_roce_of_match, dev->of_node);
4430 dev_err(dev, "device is not compatible!\n");
4433 hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4435 dev_err(dev, "couldn't get H/W specific DT data!\n");
4438 } else if (is_acpi_device_node(dev->fwnode)) {
4439 const struct acpi_device_id *acpi_id;
4441 acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4443 dev_err(dev, "device is not compatible!\n");
4446 hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4448 dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4452 dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4456 /* get the mapped register base address */
4457 hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
4458 if (IS_ERR(hr_dev->reg_base))
4459 return PTR_ERR(hr_dev->reg_base);
4461 /* read the node_guid of IB device from the DT or ACPI */
4462 ret = device_property_read_u8_array(dev, "node-guid",
4463 (u8 *)&hr_dev->ib_dev.node_guid,
4466 dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4470 /* get the RoCE associated ethernet ports or netdevices */
4471 for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4472 if (dev_of_node(dev)) {
4473 net_node = of_parse_phandle(dev->of_node, "eth-handle",
4477 pdev = of_find_device_by_node(net_node);
4478 } else if (is_acpi_device_node(dev->fwnode)) {
4479 struct fwnode_reference_args args;
4481 ret = acpi_node_get_property_reference(dev->fwnode,
4486 pdev = hns_roce_find_pdev(args.fwnode);
4488 dev_err(dev, "cannot read data from DT or ACPI\n");
4493 netdev = platform_get_drvdata(pdev);
4496 hr_dev->iboe.netdevs[port_cnt] = netdev;
4497 hr_dev->iboe.phy_port[port_cnt] = phy_port;
4499 dev_err(dev, "no netdev found with pdev %s\n",
4507 if (port_cnt == 0) {
4508 dev_err(dev, "unable to get eth-handle for available ports!\n");
4512 hr_dev->caps.num_ports = port_cnt;
4514 /* cmd issue mode: 0 is poll, 1 is event */
4515 hr_dev->cmd_mod = 1;
4516 hr_dev->loop_idc = 0;
4517 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4518 hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4520 /* read the interrupt names from the DT or ACPI */
4521 ret = device_property_read_string_array(dev, "interrupt-names",
4523 HNS_ROCE_V1_MAX_IRQ_NUM);
4525 dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4529 /* fetch the interrupt numbers */
4530 for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4531 hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4532 if (hr_dev->irq[i] <= 0)
4540 * hns_roce_probe - RoCE driver entrance
4541 * @pdev: pointer to platform device
4545 static int hns_roce_probe(struct platform_device *pdev)
4548 struct hns_roce_dev *hr_dev;
4549 struct device *dev = &pdev->dev;
4551 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
4555 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4556 if (!hr_dev->priv) {
4558 goto error_failed_kzalloc;
4561 hr_dev->pdev = pdev;
4563 platform_set_drvdata(pdev, hr_dev);
4565 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4566 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4567 dev_err(dev, "Not usable DMA addressing mode\n");
4569 goto error_failed_get_cfg;
4572 ret = hns_roce_get_cfg(hr_dev);
4574 dev_err(dev, "Get Configuration failed!\n");
4575 goto error_failed_get_cfg;
4578 ret = hns_roce_init(hr_dev);
4580 dev_err(dev, "RoCE engine init failed!\n");
4581 goto error_failed_get_cfg;
4586 error_failed_get_cfg:
4587 kfree(hr_dev->priv);
4589 error_failed_kzalloc:
4590 ib_dealloc_device(&hr_dev->ib_dev);
4596 * hns_roce_remove - remove RoCE device
4597 * @pdev: pointer to platform device
4599 static int hns_roce_remove(struct platform_device *pdev)
4601 struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4603 hns_roce_exit(hr_dev);
4604 kfree(hr_dev->priv);
4605 ib_dealloc_device(&hr_dev->ib_dev);
4610 static struct platform_driver hns_roce_driver = {
4611 .probe = hns_roce_probe,
4612 .remove = hns_roce_remove,
4615 .of_match_table = hns_roce_of_match,
4616 .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4620 module_platform_driver(hns_roce_driver);
4622 MODULE_LICENSE("Dual BSD/GPL");
4623 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4624 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4625 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4626 MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");