2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_umem.h>
43 #include "hns_roce_common.h"
44 #include "hns_roce_device.h"
45 #include "hns_roce_cmd.h"
46 #include "hns_roce_hem.h"
47 #include "hns_roce_hw_v2.h"
49 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 dseg->lkey = cpu_to_le32(sg->lkey);
53 dseg->addr = cpu_to_le64(sg->addr);
54 dseg->len = cpu_to_le32(sg->length);
57 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
58 struct hns_roce_wqe_frmr_seg *fseg,
59 const struct ib_reg_wr *wr)
61 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
63 /* use ib_access_flags */
64 roce_set_bit(rc_sq_wqe->byte_4,
65 V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
66 wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
67 roce_set_bit(rc_sq_wqe->byte_4,
68 V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
69 wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
70 roce_set_bit(rc_sq_wqe->byte_4,
71 V2_RC_FRMR_WQE_BYTE_4_RR_S,
72 wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
73 roce_set_bit(rc_sq_wqe->byte_4,
74 V2_RC_FRMR_WQE_BYTE_4_RW_S,
75 wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
76 roce_set_bit(rc_sq_wqe->byte_4,
77 V2_RC_FRMR_WQE_BYTE_4_LW_S,
78 wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
80 /* Data structure reuse may lead to confusion */
81 rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
82 rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
84 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
85 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
86 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
87 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
89 fseg->pbl_size = cpu_to_le32(mr->pbl_size);
90 roce_set_field(fseg->mode_buf_pg_sz,
91 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
92 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
93 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
94 roce_set_bit(fseg->mode_buf_pg_sz,
95 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
98 static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
99 const struct ib_atomic_wr *wr)
101 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
102 aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
103 aseg->cmp_data = cpu_to_le64(wr->compare_add);
105 aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
110 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
111 unsigned int *sge_ind)
113 struct hns_roce_v2_wqe_data_seg *dseg;
122 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
123 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
124 extend_sge_num = wr->num_sge - num_in_wqe;
125 sg = wr->sg_list + num_in_wqe;
126 shift = qp->hr_buf.page_shift;
129 * Check whether wr->num_sge sges are in the same page. If not, we
130 * should calculate how many sges in the first page and the second
133 dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
134 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
136 sizeof(struct hns_roce_v2_wqe_data_seg);
137 if (extend_sge_num > fi_sge_num) {
138 se_sge_num = extend_sge_num - fi_sge_num;
139 for (i = 0; i < fi_sge_num; i++) {
140 set_data_seg_v2(dseg++, sg + i);
143 dseg = get_send_extend_sge(qp,
144 (*sge_ind) & (qp->sge.sge_cnt - 1));
145 for (i = 0; i < se_sge_num; i++) {
146 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
150 for (i = 0; i < extend_sge_num; i++) {
151 set_data_seg_v2(dseg++, sg + i);
157 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
158 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
159 void *wqe, unsigned int *sge_ind,
160 const struct ib_send_wr **bad_wr)
162 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
163 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
164 struct hns_roce_qp *qp = to_hr_qp(ibqp);
167 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
168 if (le32_to_cpu(rc_sq_wqe->msg_len) >
169 hr_dev->caps.max_sq_inline) {
171 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
172 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
176 if (wr->opcode == IB_WR_RDMA_READ) {
178 dev_err(hr_dev->dev, "Not support inline data!\n");
182 for (i = 0; i < wr->num_sge; i++) {
183 memcpy(wqe, ((void *)wr->sg_list[i].addr),
184 wr->sg_list[i].length);
185 wqe += wr->sg_list[i].length;
188 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
191 if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
192 for (i = 0; i < wr->num_sge; i++) {
193 if (likely(wr->sg_list[i].length)) {
194 set_data_seg_v2(dseg, wr->sg_list + i);
199 roce_set_field(rc_sq_wqe->byte_20,
200 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
201 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
202 (*sge_ind) & (qp->sge.sge_cnt - 1));
204 for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
205 if (likely(wr->sg_list[i].length)) {
206 set_data_seg_v2(dseg, wr->sg_list + i);
211 set_extend_sge(qp, wr, sge_ind);
214 roce_set_field(rc_sq_wqe->byte_16,
215 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
216 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
222 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
223 const struct ib_qp_attr *attr,
224 int attr_mask, enum ib_qp_state cur_state,
225 enum ib_qp_state new_state);
227 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
228 const struct ib_send_wr *wr,
229 const struct ib_send_wr **bad_wr)
231 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
232 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
233 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
234 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
235 struct hns_roce_qp *qp = to_hr_qp(ibqp);
236 struct hns_roce_wqe_frmr_seg *fseg;
237 struct device *dev = hr_dev->dev;
238 struct hns_roce_v2_db sq_db;
239 struct ib_qp_attr attr;
240 unsigned int sge_ind = 0;
241 unsigned int owner_bit;
254 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
255 ibqp->qp_type != IB_QPT_GSI &&
256 ibqp->qp_type != IB_QPT_UD)) {
257 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
262 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
263 qp->state == IB_QPS_RTR)) {
264 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
269 spin_lock_irqsave(&qp->sq.lock, flags);
270 ind = qp->sq_next_wqe;
271 sge_ind = qp->next_sge;
273 for (nreq = 0; wr; ++nreq, wr = wr->next) {
274 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
280 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
281 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
282 wr->num_sge, qp->sq.max_gs);
288 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
289 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
293 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
296 /* Corresponding to the QP type, wqe process separately */
297 if (ibqp->qp_type == IB_QPT_GSI) {
299 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
301 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
302 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
303 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
304 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
305 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
306 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
307 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
308 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
309 roce_set_field(ud_sq_wqe->byte_48,
310 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
311 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
313 roce_set_field(ud_sq_wqe->byte_48,
314 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
315 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
319 smac = (u8 *)hr_dev->dev_addr[qp->port];
320 loopback = ether_addr_equal_unaligned(ah->av.mac,
323 roce_set_bit(ud_sq_wqe->byte_40,
324 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
326 roce_set_field(ud_sq_wqe->byte_4,
327 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
328 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
329 HNS_ROCE_V2_WQE_OP_SEND);
331 for (i = 0; i < wr->num_sge; i++)
332 tmp_len += wr->sg_list[i].length;
335 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
337 switch (wr->opcode) {
338 case IB_WR_SEND_WITH_IMM:
339 case IB_WR_RDMA_WRITE_WITH_IMM:
340 ud_sq_wqe->immtdata =
341 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
344 ud_sq_wqe->immtdata = 0;
349 roce_set_bit(ud_sq_wqe->byte_4,
350 V2_UD_SEND_WQE_BYTE_4_CQE_S,
351 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
354 roce_set_bit(ud_sq_wqe->byte_4,
355 V2_UD_SEND_WQE_BYTE_4_SE_S,
356 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
358 roce_set_bit(ud_sq_wqe->byte_4,
359 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
361 roce_set_field(ud_sq_wqe->byte_16,
362 V2_UD_SEND_WQE_BYTE_16_PD_M,
363 V2_UD_SEND_WQE_BYTE_16_PD_S,
364 to_hr_pd(ibqp->pd)->pdn);
366 roce_set_field(ud_sq_wqe->byte_16,
367 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
368 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
371 roce_set_field(ud_sq_wqe->byte_20,
372 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
373 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
374 sge_ind & (qp->sge.sge_cnt - 1));
376 roce_set_field(ud_sq_wqe->byte_24,
377 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
378 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
380 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
381 qp->qkey : ud_wr(wr)->remote_qkey);
382 roce_set_field(ud_sq_wqe->byte_32,
383 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
384 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
385 ud_wr(wr)->remote_qpn);
387 roce_set_field(ud_sq_wqe->byte_36,
388 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
389 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
390 le16_to_cpu(ah->av.vlan));
391 roce_set_field(ud_sq_wqe->byte_36,
392 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
393 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
395 roce_set_field(ud_sq_wqe->byte_36,
396 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
397 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
398 ah->av.sl_tclass_flowlabel >>
399 HNS_ROCE_TCLASS_SHIFT);
400 roce_set_field(ud_sq_wqe->byte_40,
401 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
402 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
403 ah->av.sl_tclass_flowlabel &
404 HNS_ROCE_FLOW_LABEL_MASK);
405 roce_set_field(ud_sq_wqe->byte_40,
406 V2_UD_SEND_WQE_BYTE_40_SL_M,
407 V2_UD_SEND_WQE_BYTE_40_SL_S,
408 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
410 roce_set_field(ud_sq_wqe->byte_40,
411 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
412 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
415 roce_set_bit(ud_sq_wqe->byte_40,
416 V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
417 ah->av.vlan_en ? 1 : 0);
418 roce_set_field(ud_sq_wqe->byte_48,
419 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
420 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
421 hns_get_gid_index(hr_dev, qp->phy_port,
424 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
427 set_extend_sge(qp, wr, &sge_ind);
429 } else if (ibqp->qp_type == IB_QPT_RC) {
431 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
432 for (i = 0; i < wr->num_sge; i++)
433 tmp_len += wr->sg_list[i].length;
436 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
438 switch (wr->opcode) {
439 case IB_WR_SEND_WITH_IMM:
440 case IB_WR_RDMA_WRITE_WITH_IMM:
441 rc_sq_wqe->immtdata =
442 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
444 case IB_WR_SEND_WITH_INV:
446 cpu_to_le32(wr->ex.invalidate_rkey);
449 rc_sq_wqe->immtdata = 0;
453 roce_set_bit(rc_sq_wqe->byte_4,
454 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
455 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
457 roce_set_bit(rc_sq_wqe->byte_4,
458 V2_RC_SEND_WQE_BYTE_4_SE_S,
459 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
461 roce_set_bit(rc_sq_wqe->byte_4,
462 V2_RC_SEND_WQE_BYTE_4_CQE_S,
463 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
465 roce_set_bit(rc_sq_wqe->byte_4,
466 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
468 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
469 switch (wr->opcode) {
470 case IB_WR_RDMA_READ:
471 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
473 cpu_to_le32(rdma_wr(wr)->rkey);
475 cpu_to_le64(rdma_wr(wr)->remote_addr);
477 case IB_WR_RDMA_WRITE:
478 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
480 cpu_to_le32(rdma_wr(wr)->rkey);
482 cpu_to_le64(rdma_wr(wr)->remote_addr);
484 case IB_WR_RDMA_WRITE_WITH_IMM:
485 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
487 cpu_to_le32(rdma_wr(wr)->rkey);
489 cpu_to_le64(rdma_wr(wr)->remote_addr);
492 hr_op = HNS_ROCE_V2_WQE_OP_SEND;
494 case IB_WR_SEND_WITH_INV:
495 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
497 case IB_WR_SEND_WITH_IMM:
498 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
500 case IB_WR_LOCAL_INV:
501 hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
502 roce_set_bit(rc_sq_wqe->byte_4,
503 V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
505 cpu_to_le32(wr->ex.invalidate_rkey);
508 hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
510 set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
512 case IB_WR_ATOMIC_CMP_AND_SWP:
513 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
515 cpu_to_le32(atomic_wr(wr)->rkey);
517 cpu_to_le64(atomic_wr(wr)->remote_addr);
519 case IB_WR_ATOMIC_FETCH_AND_ADD:
520 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
522 cpu_to_le32(atomic_wr(wr)->rkey);
524 cpu_to_le64(atomic_wr(wr)->remote_addr);
526 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
528 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
530 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
532 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
535 hr_op = HNS_ROCE_V2_WQE_OP_MASK;
539 roce_set_field(rc_sq_wqe->byte_4,
540 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
541 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
543 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
544 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
545 struct hns_roce_v2_wqe_data_seg *dseg;
548 set_data_seg_v2(dseg, wr->sg_list);
549 wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
550 set_atomic_seg(wqe, atomic_wr(wr));
551 roce_set_field(rc_sq_wqe->byte_16,
552 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
553 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
555 } else if (wr->opcode != IB_WR_REG_MR) {
556 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
557 wqe, &sge_ind, bad_wr);
564 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
565 spin_unlock_irqrestore(&qp->sq.lock, flags);
580 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
581 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
582 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
583 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
584 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
585 V2_DB_PARAMETER_IDX_S,
586 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
587 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
588 V2_DB_PARAMETER_SL_S, qp->sl);
590 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
592 qp->sq_next_wqe = ind;
593 qp->next_sge = sge_ind;
595 if (qp->state == IB_QPS_ERR) {
596 attr_mask = IB_QP_STATE;
597 attr.qp_state = IB_QPS_ERR;
599 ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
600 qp->state, IB_QPS_ERR);
602 spin_unlock_irqrestore(&qp->sq.lock, flags);
609 spin_unlock_irqrestore(&qp->sq.lock, flags);
614 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
615 const struct ib_recv_wr *wr,
616 const struct ib_recv_wr **bad_wr)
618 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
619 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
620 struct hns_roce_v2_wqe_data_seg *dseg;
621 struct hns_roce_rinl_sge *sge_list;
622 struct device *dev = hr_dev->dev;
623 struct ib_qp_attr attr;
632 spin_lock_irqsave(&hr_qp->rq.lock, flags);
633 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
635 if (hr_qp->state == IB_QPS_RESET) {
636 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
641 for (nreq = 0; wr; ++nreq, wr = wr->next) {
642 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
643 hr_qp->ibqp.recv_cq)) {
649 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
650 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
651 wr->num_sge, hr_qp->rq.max_gs);
657 wqe = get_recv_wqe(hr_qp, ind);
658 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
659 for (i = 0; i < wr->num_sge; i++) {
660 if (!wr->sg_list[i].length)
662 set_data_seg_v2(dseg, wr->sg_list + i);
666 if (i < hr_qp->rq.max_gs) {
667 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
671 /* rq support inline data */
672 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
673 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
674 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
676 for (i = 0; i < wr->num_sge; i++) {
678 (void *)(u64)wr->sg_list[i].addr;
679 sge_list[i].len = wr->sg_list[i].length;
683 hr_qp->rq.wrid[ind] = wr->wr_id;
685 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
690 hr_qp->rq.head += nreq;
694 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
696 if (hr_qp->state == IB_QPS_ERR) {
697 attr_mask = IB_QP_STATE;
698 attr.qp_state = IB_QPS_ERR;
700 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
701 attr_mask, hr_qp->state,
704 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
710 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
715 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
716 unsigned long instance_stage,
717 unsigned long reset_stage)
719 /* When hardware reset has been completed once or more, we should stop
720 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
721 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
722 * stage of soft reset process, we should exit with error, and then
723 * HNAE3_INIT_CLIENT related process can rollback the operation like
724 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
725 * process will exit with error to notify NIC driver to reschedule soft
726 * reset process once again.
728 hr_dev->is_reset = true;
729 hr_dev->dis_db = true;
731 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
732 instance_stage == HNS_ROCE_STATE_INIT)
733 return CMD_RST_PRC_EBUSY;
735 return CMD_RST_PRC_SUCCESS;
738 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
739 unsigned long instance_stage,
740 unsigned long reset_stage)
742 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
743 struct hnae3_handle *handle = priv->handle;
744 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
746 /* When hardware reset is detected, we should stop sending mailbox&cmq&
747 * doorbell to hardware. If now in .init_instance() function, we should
748 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
749 * process, we should exit with error, and then HNAE3_INIT_CLIENT
750 * related process can rollback the operation like notifing hardware to
751 * free resources, HNAE3_INIT_CLIENT related process will exit with
752 * error to notify NIC driver to reschedule soft reset process once
755 hr_dev->dis_db = true;
756 if (!ops->get_hw_reset_stat(handle))
757 hr_dev->is_reset = true;
759 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
760 instance_stage == HNS_ROCE_STATE_INIT)
761 return CMD_RST_PRC_EBUSY;
763 return CMD_RST_PRC_SUCCESS;
766 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
768 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
769 struct hnae3_handle *handle = priv->handle;
770 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
772 /* When software reset is detected at .init_instance() function, we
773 * should stop sending mailbox&cmq&doorbell to hardware, and exit
776 hr_dev->dis_db = true;
777 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
778 hr_dev->is_reset = true;
780 return CMD_RST_PRC_EBUSY;
783 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
785 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
786 struct hnae3_handle *handle = priv->handle;
787 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
788 unsigned long instance_stage; /* the current instance stage */
789 unsigned long reset_stage; /* the current reset stage */
790 unsigned long reset_cnt;
794 if (hr_dev->is_reset)
795 return CMD_RST_PRC_SUCCESS;
797 /* Get information about reset from NIC driver or RoCE driver itself,
798 * the meaning of the following variables from NIC driver are described
800 * reset_cnt -- The count value of completed hardware reset.
801 * hw_resetting -- Whether hardware device is resetting now.
802 * sw_resetting -- Whether NIC's software reset process is running now.
804 instance_stage = handle->rinfo.instance_state;
805 reset_stage = handle->rinfo.reset_state;
806 reset_cnt = ops->ae_dev_reset_cnt(handle);
807 hw_resetting = ops->get_hw_reset_stat(handle);
808 sw_resetting = ops->ae_dev_resetting(handle);
810 if (reset_cnt != hr_dev->reset_cnt)
811 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
813 else if (hw_resetting)
814 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
816 else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
817 return hns_roce_v2_cmd_sw_resetting(hr_dev);
822 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
824 int ntu = ring->next_to_use;
825 int ntc = ring->next_to_clean;
826 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
828 return ring->desc_num - used - 1;
831 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
832 struct hns_roce_v2_cmq_ring *ring)
834 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
836 ring->desc = kzalloc(size, GFP_KERNEL);
840 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
842 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
843 ring->desc_dma_addr = 0;
852 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
853 struct hns_roce_v2_cmq_ring *ring)
855 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
856 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
859 ring->desc_dma_addr = 0;
863 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
865 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
866 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
867 &priv->cmq.csq : &priv->cmq.crq;
869 ring->flag = ring_type;
870 ring->next_to_clean = 0;
871 ring->next_to_use = 0;
873 return hns_roce_alloc_cmq_desc(hr_dev, ring);
876 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
878 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
879 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
880 &priv->cmq.csq : &priv->cmq.crq;
881 dma_addr_t dma = ring->desc_dma_addr;
883 if (ring_type == TYPE_CSQ) {
884 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
885 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
887 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
888 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
889 HNS_ROCE_CMQ_ENABLE);
890 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
891 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
893 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
894 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
896 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
897 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
898 HNS_ROCE_CMQ_ENABLE);
899 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
900 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
904 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
906 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
909 /* Setup the queue entries for command queue */
910 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
911 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
913 /* Setup the lock for command queue */
914 spin_lock_init(&priv->cmq.csq.lock);
915 spin_lock_init(&priv->cmq.crq.lock);
917 /* Setup Tx write back timeout */
918 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
921 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
923 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
928 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
930 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
935 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
938 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
943 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
948 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
950 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
952 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
953 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
956 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
957 enum hns_roce_opcode_type opcode,
960 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
961 desc->opcode = cpu_to_le16(opcode);
963 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
965 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
967 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
970 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
972 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
973 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
975 return head == priv->cmq.csq.next_to_use;
978 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
980 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
981 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
982 struct hns_roce_cmq_desc *desc;
983 u16 ntc = csq->next_to_clean;
987 desc = &csq->desc[ntc];
988 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
989 while (head != ntc) {
990 memset(desc, 0, sizeof(*desc));
992 if (ntc == csq->desc_num)
994 desc = &csq->desc[ntc];
997 csq->next_to_clean = ntc;
1002 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1003 struct hns_roce_cmq_desc *desc, int num)
1005 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1006 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1007 struct hns_roce_cmq_desc *desc_to_use;
1008 bool complete = false;
1015 spin_lock_bh(&csq->lock);
1017 if (num > hns_roce_cmq_space(csq)) {
1018 spin_unlock_bh(&csq->lock);
1023 * Record the location of desc in the cmq for this time
1024 * which will be use for hardware to write back
1026 ntc = csq->next_to_use;
1028 while (handle < num) {
1029 desc_to_use = &csq->desc[csq->next_to_use];
1030 *desc_to_use = desc[handle];
1031 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1033 if (csq->next_to_use == csq->desc_num)
1034 csq->next_to_use = 0;
1038 /* Write to hardware */
1039 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1042 * If the command is sync, wait for the firmware to write back,
1043 * if multi descriptors to be sent, use the first one to check
1045 if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1047 if (hns_roce_cmq_csq_done(hr_dev))
1051 } while (timeout < priv->cmq.tx_timeout);
1054 if (hns_roce_cmq_csq_done(hr_dev)) {
1057 while (handle < num) {
1058 /* get the result of hardware write back */
1059 desc_to_use = &csq->desc[ntc];
1060 desc[handle] = *desc_to_use;
1061 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1062 desc_ret = desc[handle].retval;
1063 if (desc_ret == CMD_EXEC_SUCCESS)
1067 priv->cmq.last_status = desc_ret;
1070 if (ntc == csq->desc_num)
1078 /* clean the command send queue */
1079 handle = hns_roce_cmq_csq_clean(hr_dev);
1081 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1084 spin_unlock_bh(&csq->lock);
1089 int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1090 struct hns_roce_cmq_desc *desc, int num)
1095 ret = hns_roce_v2_rst_process_cmd(hr_dev);
1096 if (ret == CMD_RST_PRC_SUCCESS)
1098 if (ret == CMD_RST_PRC_EBUSY)
1101 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1103 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1104 if (retval == CMD_RST_PRC_SUCCESS)
1106 else if (retval == CMD_RST_PRC_EBUSY)
1113 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1115 struct hns_roce_query_version *resp;
1116 struct hns_roce_cmq_desc desc;
1119 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1120 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1124 resp = (struct hns_roce_query_version *)desc.data;
1125 hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
1126 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1131 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1133 struct hns_roce_query_fw_info *resp;
1134 struct hns_roce_cmq_desc desc;
1137 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1138 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1142 resp = (struct hns_roce_query_fw_info *)desc.data;
1143 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1148 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1150 struct hns_roce_cfg_global_param *req;
1151 struct hns_roce_cmq_desc desc;
1153 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1156 req = (struct hns_roce_cfg_global_param *)desc.data;
1157 memset(req, 0, sizeof(*req));
1158 roce_set_field(req->time_cfg_udp_port,
1159 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1160 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1161 roce_set_field(req->time_cfg_udp_port,
1162 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1163 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1165 return hns_roce_cmq_send(hr_dev, &desc, 1);
1168 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1170 struct hns_roce_cmq_desc desc[2];
1171 struct hns_roce_pf_res_a *req_a;
1172 struct hns_roce_pf_res_b *req_b;
1176 for (i = 0; i < 2; i++) {
1177 hns_roce_cmq_setup_basic_desc(&desc[i],
1178 HNS_ROCE_OPC_QUERY_PF_RES, true);
1181 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1183 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1186 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1190 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1191 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1193 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1194 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1195 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1196 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1197 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1198 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1199 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1200 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1201 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1202 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1203 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1204 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1206 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1207 PF_RES_DATA_3_PF_SL_NUM_M,
1208 PF_RES_DATA_3_PF_SL_NUM_S);
1209 hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1210 PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1211 PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1216 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1218 struct hns_roce_pf_timer_res_a *req_a;
1219 struct hns_roce_cmq_desc desc[2];
1222 for (i = 0; i < 2; i++) {
1223 hns_roce_cmq_setup_basic_desc(&desc[i],
1224 HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1228 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1230 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1233 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1237 req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1239 hr_dev->caps.qpc_timer_bt_num =
1240 roce_get_field(req_a->qpc_timer_bt_idx_num,
1241 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1242 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1243 hr_dev->caps.cqc_timer_bt_num =
1244 roce_get_field(req_a->cqc_timer_bt_idx_num,
1245 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1246 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1251 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1254 struct hns_roce_cmq_desc desc;
1255 struct hns_roce_vf_switch *swt;
1258 swt = (struct hns_roce_vf_switch *)desc.data;
1259 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1260 swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1261 roce_set_field(swt->fun_id,
1262 VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1263 VF_SWITCH_DATA_FUN_ID_VF_ID_S,
1265 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1269 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1270 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1271 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1272 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1);
1273 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1275 return hns_roce_cmq_send(hr_dev, &desc, 1);
1278 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1280 struct hns_roce_cmq_desc desc[2];
1281 struct hns_roce_vf_res_a *req_a;
1282 struct hns_roce_vf_res_b *req_b;
1285 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1286 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1287 memset(req_a, 0, sizeof(*req_a));
1288 memset(req_b, 0, sizeof(*req_b));
1289 for (i = 0; i < 2; i++) {
1290 hns_roce_cmq_setup_basic_desc(&desc[i],
1291 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1294 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1296 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1299 roce_set_field(req_a->vf_qpc_bt_idx_num,
1300 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1301 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1302 roce_set_field(req_a->vf_qpc_bt_idx_num,
1303 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1304 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1305 HNS_ROCE_VF_QPC_BT_NUM);
1307 roce_set_field(req_a->vf_srqc_bt_idx_num,
1308 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1309 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1310 roce_set_field(req_a->vf_srqc_bt_idx_num,
1311 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1312 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1313 HNS_ROCE_VF_SRQC_BT_NUM);
1315 roce_set_field(req_a->vf_cqc_bt_idx_num,
1316 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1317 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1318 roce_set_field(req_a->vf_cqc_bt_idx_num,
1319 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1320 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1321 HNS_ROCE_VF_CQC_BT_NUM);
1323 roce_set_field(req_a->vf_mpt_bt_idx_num,
1324 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1325 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1326 roce_set_field(req_a->vf_mpt_bt_idx_num,
1327 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1328 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1329 HNS_ROCE_VF_MPT_BT_NUM);
1331 roce_set_field(req_a->vf_eqc_bt_idx_num,
1332 VF_RES_A_DATA_5_VF_EQC_IDX_M,
1333 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1334 roce_set_field(req_a->vf_eqc_bt_idx_num,
1335 VF_RES_A_DATA_5_VF_EQC_NUM_M,
1336 VF_RES_A_DATA_5_VF_EQC_NUM_S,
1337 HNS_ROCE_VF_EQC_NUM);
1339 roce_set_field(req_b->vf_smac_idx_num,
1340 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1341 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1342 roce_set_field(req_b->vf_smac_idx_num,
1343 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1344 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1345 HNS_ROCE_VF_SMAC_NUM);
1347 roce_set_field(req_b->vf_sgid_idx_num,
1348 VF_RES_B_DATA_2_VF_SGID_IDX_M,
1349 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1350 roce_set_field(req_b->vf_sgid_idx_num,
1351 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1352 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1353 HNS_ROCE_VF_SGID_NUM);
1355 roce_set_field(req_b->vf_qid_idx_sl_num,
1356 VF_RES_B_DATA_3_VF_QID_IDX_M,
1357 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1358 roce_set_field(req_b->vf_qid_idx_sl_num,
1359 VF_RES_B_DATA_3_VF_SL_NUM_M,
1360 VF_RES_B_DATA_3_VF_SL_NUM_S,
1361 HNS_ROCE_VF_SL_NUM);
1363 roce_set_field(req_b->vf_sccc_idx_num,
1364 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1365 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1366 roce_set_field(req_b->vf_sccc_idx_num,
1367 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1368 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1369 HNS_ROCE_VF_SCCC_BT_NUM);
1373 return hns_roce_cmq_send(hr_dev, desc, 2);
1376 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1378 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1379 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1380 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1381 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1382 u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1383 struct hns_roce_cfg_bt_attr *req;
1384 struct hns_roce_cmq_desc desc;
1386 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1387 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1388 memset(req, 0, sizeof(*req));
1390 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1391 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1392 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1393 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1394 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1395 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1396 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1397 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1398 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1400 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1401 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1402 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1403 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1404 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1405 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1406 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1407 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1408 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1410 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1411 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1412 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1413 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1414 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1415 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1416 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1417 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1418 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1420 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1421 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1422 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1423 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1424 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1425 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1426 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1427 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1428 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1430 roce_set_field(req->vf_sccc_cfg,
1431 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1432 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1433 hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1434 roce_set_field(req->vf_sccc_cfg,
1435 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1436 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1437 hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1438 roce_set_field(req->vf_sccc_cfg,
1439 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1440 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1442 HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1444 return hns_roce_cmq_send(hr_dev, &desc, 1);
1447 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1449 struct hns_roce_caps *caps = &hr_dev->caps;
1452 ret = hns_roce_cmq_query_hw_info(hr_dev);
1454 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1459 ret = hns_roce_query_fw_ver(hr_dev);
1461 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1466 ret = hns_roce_config_global_param(hr_dev);
1468 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1473 /* Get pf resource owned by every pf */
1474 ret = hns_roce_query_pf_resource(hr_dev);
1476 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1481 if (hr_dev->pci_dev->revision == 0x21) {
1482 ret = hns_roce_query_pf_timer_resource(hr_dev);
1484 dev_err(hr_dev->dev,
1485 "Query pf timer resource fail, ret = %d.\n",
1491 ret = hns_roce_alloc_vf_resource(hr_dev);
1493 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1498 if (hr_dev->pci_dev->revision == 0x21) {
1499 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1501 dev_err(hr_dev->dev,
1502 "Set function switch param fail, ret = %d.\n",
1508 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1509 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1511 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1512 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1513 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1514 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1515 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1516 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1517 caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
1518 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1519 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1520 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1521 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1522 caps->max_srq_sg = HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
1523 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1524 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1525 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1526 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1527 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1528 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1529 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1530 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1531 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1532 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1533 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1534 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1535 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1536 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1537 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1538 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1539 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1540 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1541 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1542 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1543 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1544 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1545 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1546 caps->idx_entry_sz = 4;
1547 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1548 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1549 caps->reserved_lkey = 0;
1550 caps->reserved_pds = 0;
1551 caps->reserved_mrws = 1;
1552 caps->reserved_uars = 0;
1553 caps->reserved_cqs = 0;
1554 caps->reserved_srqs = 0;
1555 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1557 caps->qpc_ba_pg_sz = 0;
1558 caps->qpc_buf_pg_sz = 0;
1559 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1560 caps->srqc_ba_pg_sz = 0;
1561 caps->srqc_buf_pg_sz = 0;
1562 caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
1563 caps->cqc_ba_pg_sz = 0;
1564 caps->cqc_buf_pg_sz = 0;
1565 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1566 caps->mpt_ba_pg_sz = 0;
1567 caps->mpt_buf_pg_sz = 0;
1568 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1569 caps->pbl_ba_pg_sz = 2;
1570 caps->pbl_buf_pg_sz = 0;
1571 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1572 caps->mtt_ba_pg_sz = 0;
1573 caps->mtt_buf_pg_sz = 0;
1574 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1575 caps->cqe_ba_pg_sz = 0;
1576 caps->cqe_buf_pg_sz = 0;
1577 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1578 caps->srqwqe_ba_pg_sz = 0;
1579 caps->srqwqe_buf_pg_sz = 0;
1580 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1581 caps->idx_ba_pg_sz = 0;
1582 caps->idx_buf_pg_sz = 0;
1583 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1584 caps->eqe_ba_pg_sz = 0;
1585 caps->eqe_buf_pg_sz = 0;
1586 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1587 caps->tsq_buf_pg_sz = 0;
1588 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1590 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1591 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1592 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1593 HNS_ROCE_CAP_FLAG_RECORD_DB |
1594 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1596 if (hr_dev->pci_dev->revision == 0x21)
1597 caps->flags |= HNS_ROCE_CAP_FLAG_MW |
1598 HNS_ROCE_CAP_FLAG_FRMR;
1600 caps->pkey_table_len[0] = 1;
1601 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1602 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1603 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1604 caps->local_ca_ack_delay = 0;
1605 caps->max_mtu = IB_MTU_4096;
1607 caps->max_srqs = HNS_ROCE_V2_MAX_SRQ;
1608 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1609 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1611 if (hr_dev->pci_dev->revision == 0x21) {
1612 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
1613 HNS_ROCE_CAP_FLAG_SRQ |
1614 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1616 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1617 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1618 caps->qpc_timer_ba_pg_sz = 0;
1619 caps->qpc_timer_buf_pg_sz = 0;
1620 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1621 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1622 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1623 caps->cqc_timer_ba_pg_sz = 0;
1624 caps->cqc_timer_buf_pg_sz = 0;
1625 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1627 caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1628 caps->sccc_ba_pg_sz = 0;
1629 caps->sccc_buf_pg_sz = 0;
1630 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1633 ret = hns_roce_v2_set_bt(hr_dev);
1635 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1641 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1642 enum hns_roce_link_table_type type)
1644 struct hns_roce_cmq_desc desc[2];
1645 struct hns_roce_cfg_llm_a *req_a =
1646 (struct hns_roce_cfg_llm_a *)desc[0].data;
1647 struct hns_roce_cfg_llm_b *req_b =
1648 (struct hns_roce_cfg_llm_b *)desc[1].data;
1649 struct hns_roce_v2_priv *priv = hr_dev->priv;
1650 struct hns_roce_link_table *link_tbl;
1651 struct hns_roce_link_table_entry *entry;
1652 enum hns_roce_opcode_type opcode;
1657 case TSQ_LINK_TABLE:
1658 link_tbl = &priv->tsq;
1659 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1661 case TPQ_LINK_TABLE:
1662 link_tbl = &priv->tpq;
1663 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1669 page_num = link_tbl->npages;
1670 entry = link_tbl->table.buf;
1671 memset(req_a, 0, sizeof(*req_a));
1672 memset(req_b, 0, sizeof(*req_b));
1674 for (i = 0; i < 2; i++) {
1675 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1678 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1680 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1683 req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
1684 req_a->base_addr_h = (link_tbl->table.map >> 32) &
1686 roce_set_field(req_a->depth_pgsz_init_en,
1687 CFG_LLM_QUE_DEPTH_M,
1688 CFG_LLM_QUE_DEPTH_S,
1690 roce_set_field(req_a->depth_pgsz_init_en,
1694 req_a->head_ba_l = entry[0].blk_ba0;
1695 req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
1696 roce_set_field(req_a->head_ptr,
1698 CFG_LLM_HEAD_PTR_S, 0);
1700 req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
1701 roce_set_field(req_b->tail_ba_h,
1702 CFG_LLM_TAIL_BA_H_M,
1703 CFG_LLM_TAIL_BA_H_S,
1704 entry[page_num - 1].blk_ba1_nxt_ptr &
1705 HNS_ROCE_LINK_TABLE_BA1_M);
1706 roce_set_field(req_b->tail_ptr,
1709 (entry[page_num - 2].blk_ba1_nxt_ptr &
1710 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1711 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1714 roce_set_field(req_a->depth_pgsz_init_en,
1715 CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1717 return hns_roce_cmq_send(hr_dev, desc, 2);
1720 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1721 enum hns_roce_link_table_type type)
1723 struct hns_roce_v2_priv *priv = hr_dev->priv;
1724 struct hns_roce_link_table *link_tbl;
1725 struct hns_roce_link_table_entry *entry;
1726 struct device *dev = hr_dev->dev;
1737 case TSQ_LINK_TABLE:
1738 link_tbl = &priv->tsq;
1739 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1740 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1741 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1743 case TPQ_LINK_TABLE:
1744 link_tbl = &priv->tpq;
1745 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1746 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1747 pg_num_b = 2 * 4 * func_num + 2;
1753 pg_num = max(pg_num_a, pg_num_b);
1754 size = pg_num * sizeof(struct hns_roce_link_table_entry);
1756 link_tbl->table.buf = dma_alloc_coherent(dev, size,
1757 &link_tbl->table.map,
1759 if (!link_tbl->table.buf)
1762 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1764 if (!link_tbl->pg_list)
1765 goto err_kcalloc_failed;
1767 entry = link_tbl->table.buf;
1768 for (i = 0; i < pg_num; ++i) {
1769 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1771 if (!link_tbl->pg_list[i].buf)
1772 goto err_alloc_buf_failed;
1774 link_tbl->pg_list[i].map = t;
1775 memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz);
1777 entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
1778 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1779 HNS_ROCE_LINK_TABLE_BA1_M,
1780 HNS_ROCE_LINK_TABLE_BA1_S,
1783 if (i < (pg_num - 1))
1784 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1785 HNS_ROCE_LINK_TABLE_NXT_PTR_M,
1786 HNS_ROCE_LINK_TABLE_NXT_PTR_S,
1789 link_tbl->npages = pg_num;
1790 link_tbl->pg_sz = buf_chk_sz;
1792 return hns_roce_config_link_table(hr_dev, type);
1794 err_alloc_buf_failed:
1795 for (i -= 1; i >= 0; i--)
1796 dma_free_coherent(dev, buf_chk_sz,
1797 link_tbl->pg_list[i].buf,
1798 link_tbl->pg_list[i].map);
1799 kfree(link_tbl->pg_list);
1802 dma_free_coherent(dev, size, link_tbl->table.buf,
1803 link_tbl->table.map);
1809 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1810 struct hns_roce_link_table *link_tbl)
1812 struct device *dev = hr_dev->dev;
1816 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1818 for (i = 0; i < link_tbl->npages; ++i)
1819 if (link_tbl->pg_list[i].buf)
1820 dma_free_coherent(dev, link_tbl->pg_sz,
1821 link_tbl->pg_list[i].buf,
1822 link_tbl->pg_list[i].map);
1823 kfree(link_tbl->pg_list);
1825 dma_free_coherent(dev, size, link_tbl->table.buf,
1826 link_tbl->table.map);
1829 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1831 struct hns_roce_v2_priv *priv = hr_dev->priv;
1832 int qpc_count, cqc_count;
1835 /* TSQ includes SQ doorbell and ack doorbell */
1836 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1838 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1842 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1844 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1845 goto err_tpq_init_failed;
1848 /* Alloc memory for QPC Timer buffer space chunk*/
1849 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
1851 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
1854 dev_err(hr_dev->dev, "QPC Timer get failed\n");
1855 goto err_qpc_timer_failed;
1859 /* Alloc memory for CQC Timer buffer space chunk*/
1860 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
1862 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
1865 dev_err(hr_dev->dev, "CQC Timer get failed\n");
1866 goto err_cqc_timer_failed;
1872 err_cqc_timer_failed:
1873 for (i = 0; i < cqc_count; i++)
1874 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
1876 err_qpc_timer_failed:
1877 for (i = 0; i < qpc_count; i++)
1878 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
1880 hns_roce_free_link_table(hr_dev, &priv->tpq);
1882 err_tpq_init_failed:
1883 hns_roce_free_link_table(hr_dev, &priv->tsq);
1888 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
1890 struct hns_roce_v2_priv *priv = hr_dev->priv;
1892 hns_roce_free_link_table(hr_dev, &priv->tpq);
1893 hns_roce_free_link_table(hr_dev, &priv->tsq);
1896 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
1898 struct hns_roce_cmq_desc desc;
1899 struct hns_roce_mbox_status *mb_st =
1900 (struct hns_roce_mbox_status *)desc.data;
1901 enum hns_roce_cmd_return_status status;
1903 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
1905 status = hns_roce_cmq_send(hr_dev, &desc, 1);
1909 return cpu_to_le32(mb_st->mb_status_hw_run);
1912 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1914 u32 status = hns_roce_query_mbox_status(hr_dev);
1916 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1919 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1921 u32 status = hns_roce_query_mbox_status(hr_dev);
1923 return status & HNS_ROCE_HW_MB_STATUS_MASK;
1926 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
1927 u64 out_param, u32 in_modifier, u8 op_modifier,
1928 u16 op, u16 token, int event)
1930 struct hns_roce_cmq_desc desc;
1931 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
1933 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
1935 mb->in_param_l = cpu_to_le64(in_param);
1936 mb->in_param_h = cpu_to_le64(in_param) >> 32;
1937 mb->out_param_l = cpu_to_le64(out_param);
1938 mb->out_param_h = cpu_to_le64(out_param) >> 32;
1939 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
1940 mb->token_event_en = cpu_to_le32(event << 16 | token);
1942 return hns_roce_cmq_send(hr_dev, &desc, 1);
1945 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1946 u64 out_param, u32 in_modifier, u8 op_modifier,
1947 u16 op, u16 token, int event)
1949 struct device *dev = hr_dev->dev;
1953 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
1954 while (hns_roce_v2_cmd_pending(hr_dev)) {
1955 if (time_after(jiffies, end)) {
1956 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
1963 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
1964 op_modifier, op, token, event);
1966 dev_err(dev, "Post mailbox fail(%d)\n", ret);
1971 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
1972 unsigned long timeout)
1974 struct device *dev = hr_dev->dev;
1975 unsigned long end = 0;
1978 end = msecs_to_jiffies(timeout) + jiffies;
1979 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
1982 if (hns_roce_v2_cmd_pending(hr_dev)) {
1983 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1987 status = hns_roce_v2_cmd_complete(hr_dev);
1988 if (status != 0x1) {
1989 if (status == CMD_RST_PRC_EBUSY)
1992 dev_err(dev, "mailbox status 0x%x!\n", status);
1999 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2000 int gid_index, const union ib_gid *gid,
2001 enum hns_roce_sgid_type sgid_type)
2003 struct hns_roce_cmq_desc desc;
2004 struct hns_roce_cfg_sgid_tb *sgid_tb =
2005 (struct hns_roce_cfg_sgid_tb *)desc.data;
2008 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2010 roce_set_field(sgid_tb->table_idx_rsv,
2011 CFG_SGID_TB_TABLE_IDX_M,
2012 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2013 roce_set_field(sgid_tb->vf_sgid_type_rsv,
2014 CFG_SGID_TB_VF_SGID_TYPE_M,
2015 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2017 p = (u32 *)&gid->raw[0];
2018 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2020 p = (u32 *)&gid->raw[4];
2021 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2023 p = (u32 *)&gid->raw[8];
2024 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2026 p = (u32 *)&gid->raw[0xc];
2027 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2029 return hns_roce_cmq_send(hr_dev, &desc, 1);
2032 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2033 int gid_index, const union ib_gid *gid,
2034 const struct ib_gid_attr *attr)
2036 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2042 if (attr->gid_type == IB_GID_TYPE_ROCE)
2043 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2045 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2046 if (ipv6_addr_v4mapped((void *)gid))
2047 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2049 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2052 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2054 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
2059 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2062 struct hns_roce_cmq_desc desc;
2063 struct hns_roce_cfg_smac_tb *smac_tb =
2064 (struct hns_roce_cfg_smac_tb *)desc.data;
2068 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2070 reg_smac_l = *(u32 *)(&addr[0]);
2071 reg_smac_h = *(u16 *)(&addr[4]);
2073 memset(smac_tb, 0, sizeof(*smac_tb));
2074 roce_set_field(smac_tb->tb_idx_rsv,
2076 CFG_SMAC_TB_IDX_S, phy_port);
2077 roce_set_field(smac_tb->vf_smac_h_rsv,
2078 CFG_SMAC_TB_VF_SMAC_H_M,
2079 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2080 smac_tb->vf_smac_l = reg_smac_l;
2082 return hns_roce_cmq_send(hr_dev, &desc, 1);
2085 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2086 struct hns_roce_mr *mr)
2088 struct sg_dma_page_iter sg_iter;
2093 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2094 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2095 roce_set_field(mpt_entry->byte_48_mode_ba,
2096 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2097 upper_32_bits(mr->pbl_ba >> 3));
2099 pages = (u64 *)__get_free_page(GFP_KERNEL);
2104 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2105 page_addr = sg_page_iter_dma_address(&sg_iter);
2106 pages[i] = page_addr >> 6;
2108 /* Record the first 2 entry directly to MTPT table */
2109 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2114 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2115 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2116 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2118 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2119 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2120 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2121 roce_set_field(mpt_entry->byte_64_buf_pa1,
2122 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2123 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2124 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2126 free_page((unsigned long)pages);
2131 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2132 unsigned long mtpt_idx)
2134 struct hns_roce_v2_mpt_entry *mpt_entry;
2138 memset(mpt_entry, 0, sizeof(*mpt_entry));
2140 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2141 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2142 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2143 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2144 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2145 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2146 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2147 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2148 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2149 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2150 V2_MPT_BYTE_4_PD_S, mr->pd);
2152 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2153 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2154 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2155 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2156 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2157 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2158 mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2159 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2160 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2161 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2162 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2163 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2164 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2166 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2167 mr->type == MR_TYPE_MR ? 0 : 1);
2168 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2171 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2172 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2173 mpt_entry->lkey = cpu_to_le32(mr->key);
2174 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2175 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2177 if (mr->type == MR_TYPE_DMA)
2180 ret = set_mtpt_pbl(mpt_entry, mr);
2185 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2186 struct hns_roce_mr *mr, int flags,
2187 u32 pdn, int mr_access_flags, u64 iova,
2188 u64 size, void *mb_buf)
2190 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2193 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2194 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2196 if (flags & IB_MR_REREG_PD) {
2197 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2198 V2_MPT_BYTE_4_PD_S, pdn);
2202 if (flags & IB_MR_REREG_ACCESS) {
2203 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2204 V2_MPT_BYTE_8_BIND_EN_S,
2205 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2206 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2207 V2_MPT_BYTE_8_ATOMIC_EN_S,
2208 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2209 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2210 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2211 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2212 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2213 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2214 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2217 if (flags & IB_MR_REREG_TRANS) {
2218 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2219 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2220 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2221 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2226 ret = set_mtpt_pbl(mpt_entry, mr);
2232 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2234 struct hns_roce_v2_mpt_entry *mpt_entry;
2237 memset(mpt_entry, 0, sizeof(*mpt_entry));
2239 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2240 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2241 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2242 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2243 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2244 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2245 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2246 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2247 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2248 V2_MPT_BYTE_4_PD_S, mr->pd);
2250 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2251 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2252 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2254 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2255 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2256 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2257 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2259 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2261 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2262 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2263 V2_MPT_BYTE_48_PBL_BA_H_S,
2264 upper_32_bits(mr->pbl_ba >> 3));
2266 roce_set_field(mpt_entry->byte_64_buf_pa1,
2267 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2268 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2269 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2274 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2276 struct hns_roce_v2_mpt_entry *mpt_entry;
2279 memset(mpt_entry, 0, sizeof(*mpt_entry));
2281 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2282 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2283 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2284 V2_MPT_BYTE_4_PD_S, mw->pdn);
2285 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2286 V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2287 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2288 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
2289 0 : mw->pbl_hop_num);
2290 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2291 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2292 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2293 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2295 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2296 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2298 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2299 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2300 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2301 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2302 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2304 roce_set_field(mpt_entry->byte_64_buf_pa1,
2305 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2306 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2307 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2309 mpt_entry->lkey = cpu_to_le32(mw->rkey);
2314 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2316 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
2317 n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2320 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2322 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2324 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2325 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2326 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
2329 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2331 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2334 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2336 return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2339 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2344 /* always called with interrupts disabled. */
2345 spin_lock(&srq->lock);
2347 bitmap_num = wqe_index / (sizeof(u64) * 8);
2348 bit_num = wqe_index % (sizeof(u64) * 8);
2349 srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num);
2352 spin_unlock(&srq->lock);
2355 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2357 *hr_cq->set_ci_db = cons_index & 0xffffff;
2360 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2361 struct hns_roce_srq *srq)
2363 struct hns_roce_v2_cqe *cqe, *dest;
2369 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2371 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2376 * Now backwards through the CQ, removing CQ entries
2377 * that match our QP by overwriting them with next entries.
2379 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2380 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2381 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2382 V2_CQE_BYTE_16_LCL_QPN_S) &
2383 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2385 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2386 wqe_index = roce_get_field(cqe->byte_4,
2387 V2_CQE_BYTE_4_WQE_INDX_M,
2388 V2_CQE_BYTE_4_WQE_INDX_S);
2389 hns_roce_free_srq_wqe(srq, wqe_index);
2392 } else if (nfreed) {
2393 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2395 owner_bit = roce_get_bit(dest->byte_4,
2396 V2_CQE_BYTE_4_OWNER_S);
2397 memcpy(dest, cqe, sizeof(*cqe));
2398 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2404 hr_cq->cons_index += nfreed;
2406 * Make sure update of buffer contents is done before
2407 * updating consumer index.
2410 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2414 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2415 struct hns_roce_srq *srq)
2417 spin_lock_irq(&hr_cq->lock);
2418 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2419 spin_unlock_irq(&hr_cq->lock);
2422 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2423 struct hns_roce_cq *hr_cq, void *mb_buf,
2424 u64 *mtts, dma_addr_t dma_handle, int nent,
2427 struct hns_roce_v2_cq_context *cq_context;
2429 cq_context = mb_buf;
2430 memset(cq_context, 0, sizeof(*cq_context));
2432 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2433 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2434 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2435 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2436 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2437 V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
2438 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2439 V2_CQC_BYTE_4_CEQN_S, vector);
2440 cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
2442 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2443 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2445 cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
2446 cq_context->cqe_cur_blk_addr =
2447 cpu_to_le32(cq_context->cqe_cur_blk_addr);
2449 roce_set_field(cq_context->byte_16_hop_addr,
2450 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2451 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2452 cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
2453 roce_set_field(cq_context->byte_16_hop_addr,
2454 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2455 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2456 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2458 cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
2459 roce_set_field(cq_context->byte_24_pgsz_addr,
2460 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2461 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2462 cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
2463 roce_set_field(cq_context->byte_24_pgsz_addr,
2464 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2465 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2466 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2467 roce_set_field(cq_context->byte_24_pgsz_addr,
2468 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2469 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2470 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2472 cq_context->cqe_ba = (u32)(dma_handle >> 3);
2474 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2475 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2478 roce_set_bit(cq_context->byte_44_db_record,
2479 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2481 roce_set_field(cq_context->byte_44_db_record,
2482 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2483 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2484 ((u32)hr_cq->db.dma) >> 1);
2485 cq_context->db_record_addr = hr_cq->db.dma >> 32;
2487 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2488 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2489 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2490 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2491 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2492 V2_CQC_BYTE_56_CQ_PERIOD_M,
2493 V2_CQC_BYTE_56_CQ_PERIOD_S,
2494 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2497 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2498 enum ib_cq_notify_flags flags)
2500 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
2501 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2502 u32 notification_flag;
2508 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2509 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2511 * flags = 0; Notification Flag = 1, next
2512 * flags = 1; Notification Flag = 0, solocited
2514 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2516 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2517 HNS_ROCE_V2_CQ_DB_NTR);
2518 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2519 V2_CQ_DB_PARAMETER_CONS_IDX_S,
2520 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2521 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2522 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2523 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2526 hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
2531 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2532 struct hns_roce_qp **cur_qp,
2535 struct hns_roce_rinl_sge *sge_list;
2536 u32 wr_num, wr_cnt, sge_num;
2537 u32 sge_cnt, data_len, size;
2540 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2541 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2542 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2544 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2545 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2546 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2547 data_len = wc->byte_len;
2549 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2550 size = min(sge_list[sge_cnt].len, data_len);
2551 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2558 wc->status = IB_WC_LOC_LEN_ERR;
2565 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2566 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2568 struct hns_roce_srq *srq = NULL;
2569 struct hns_roce_dev *hr_dev;
2570 struct hns_roce_v2_cqe *cqe;
2571 struct hns_roce_qp *hr_qp;
2572 struct hns_roce_wq *wq;
2573 struct ib_qp_attr attr;
2582 /* Find cqe according to consumer index */
2583 cqe = next_cqe_sw_v2(hr_cq);
2587 ++hr_cq->cons_index;
2588 /* Memory barrier */
2592 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2594 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2595 V2_CQE_BYTE_16_LCL_QPN_S);
2597 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2598 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2599 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2600 if (unlikely(!hr_qp)) {
2601 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2602 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2608 wc->qp = &(*cur_qp)->ibqp;
2612 wq = &(*cur_qp)->sq;
2613 if ((*cur_qp)->sq_signal_bits) {
2615 * If sg_signal_bit is 1,
2616 * firstly tail pointer updated to wqe
2617 * which current cqe correspond to
2619 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2620 V2_CQE_BYTE_4_WQE_INDX_M,
2621 V2_CQE_BYTE_4_WQE_INDX_S);
2622 wq->tail += (wqe_ctr - (u16)wq->tail) &
2626 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2628 } else if ((*cur_qp)->ibqp.srq) {
2629 srq = to_hr_srq((*cur_qp)->ibqp.srq);
2630 wqe_ctr = le16_to_cpu(roce_get_field(cqe->byte_4,
2631 V2_CQE_BYTE_4_WQE_INDX_M,
2632 V2_CQE_BYTE_4_WQE_INDX_S));
2633 wc->wr_id = srq->wrid[wqe_ctr];
2634 hns_roce_free_srq_wqe(srq, wqe_ctr);
2636 /* Update tail pointer, record wr_id */
2637 wq = &(*cur_qp)->rq;
2638 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2642 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2643 V2_CQE_BYTE_4_STATUS_S);
2644 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2645 case HNS_ROCE_CQE_V2_SUCCESS:
2646 wc->status = IB_WC_SUCCESS;
2648 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2649 wc->status = IB_WC_LOC_LEN_ERR;
2651 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2652 wc->status = IB_WC_LOC_QP_OP_ERR;
2654 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2655 wc->status = IB_WC_LOC_PROT_ERR;
2657 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2658 wc->status = IB_WC_WR_FLUSH_ERR;
2660 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2661 wc->status = IB_WC_MW_BIND_ERR;
2663 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2664 wc->status = IB_WC_BAD_RESP_ERR;
2666 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2667 wc->status = IB_WC_LOC_ACCESS_ERR;
2669 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2670 wc->status = IB_WC_REM_INV_REQ_ERR;
2672 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2673 wc->status = IB_WC_REM_ACCESS_ERR;
2675 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2676 wc->status = IB_WC_REM_OP_ERR;
2678 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2679 wc->status = IB_WC_RETRY_EXC_ERR;
2681 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2682 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2684 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2685 wc->status = IB_WC_REM_ABORT_ERR;
2688 wc->status = IB_WC_GENERAL_ERR;
2692 /* flush cqe if wc status is error, excluding flush error */
2693 if ((wc->status != IB_WC_SUCCESS) &&
2694 (wc->status != IB_WC_WR_FLUSH_ERR)) {
2695 attr_mask = IB_QP_STATE;
2696 attr.qp_state = IB_QPS_ERR;
2697 return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2699 (*cur_qp)->state, IB_QPS_ERR);
2702 if (wc->status == IB_WC_WR_FLUSH_ERR)
2707 /* SQ corresponding to CQE */
2708 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2709 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2710 case HNS_ROCE_SQ_OPCODE_SEND:
2711 wc->opcode = IB_WC_SEND;
2713 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2714 wc->opcode = IB_WC_SEND;
2716 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2717 wc->opcode = IB_WC_SEND;
2718 wc->wc_flags |= IB_WC_WITH_IMM;
2720 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2721 wc->opcode = IB_WC_RDMA_READ;
2722 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2724 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2725 wc->opcode = IB_WC_RDMA_WRITE;
2727 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2728 wc->opcode = IB_WC_RDMA_WRITE;
2729 wc->wc_flags |= IB_WC_WITH_IMM;
2731 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2732 wc->opcode = IB_WC_LOCAL_INV;
2733 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2735 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2736 wc->opcode = IB_WC_COMP_SWAP;
2739 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2740 wc->opcode = IB_WC_FETCH_ADD;
2743 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2744 wc->opcode = IB_WC_MASKED_COMP_SWAP;
2747 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2748 wc->opcode = IB_WC_MASKED_FETCH_ADD;
2751 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2752 wc->opcode = IB_WC_REG_MR;
2754 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2755 wc->opcode = IB_WC_REG_MR;
2758 wc->status = IB_WC_GENERAL_ERR;
2762 /* RQ correspond to CQE */
2763 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2765 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2766 V2_CQE_BYTE_4_OPCODE_S);
2767 switch (opcode & 0x1f) {
2768 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2769 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2770 wc->wc_flags = IB_WC_WITH_IMM;
2772 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2774 case HNS_ROCE_V2_OPCODE_SEND:
2775 wc->opcode = IB_WC_RECV;
2778 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2779 wc->opcode = IB_WC_RECV;
2780 wc->wc_flags = IB_WC_WITH_IMM;
2782 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2784 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2785 wc->opcode = IB_WC_RECV;
2786 wc->wc_flags = IB_WC_WITH_INVALIDATE;
2787 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2790 wc->status = IB_WC_GENERAL_ERR;
2794 if ((wc->qp->qp_type == IB_QPT_RC ||
2795 wc->qp->qp_type == IB_QPT_UC) &&
2796 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2797 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2798 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2799 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2800 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2805 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2806 V2_CQE_BYTE_32_SL_S);
2807 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2808 V2_CQE_BYTE_32_RMT_QPN_M,
2809 V2_CQE_BYTE_32_RMT_QPN_S);
2811 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2812 V2_CQE_BYTE_32_GRH_S) ?
2814 wc->port_num = roce_get_field(cqe->byte_32,
2815 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2817 memcpy(wc->smac, cqe->smac, 4);
2818 wc->smac[4] = roce_get_field(cqe->byte_28,
2819 V2_CQE_BYTE_28_SMAC_4_M,
2820 V2_CQE_BYTE_28_SMAC_4_S);
2821 wc->smac[5] = roce_get_field(cqe->byte_28,
2822 V2_CQE_BYTE_28_SMAC_5_M,
2823 V2_CQE_BYTE_28_SMAC_5_S);
2824 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
2825 wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
2826 V2_CQE_BYTE_28_VID_M,
2827 V2_CQE_BYTE_28_VID_S);
2829 wc->vlan_id = 0xffff;
2832 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
2833 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2834 V2_CQE_BYTE_28_PORT_TYPE_M,
2835 V2_CQE_BYTE_28_PORT_TYPE_S);
2841 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2844 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2845 struct hns_roce_qp *cur_qp = NULL;
2846 unsigned long flags;
2849 spin_lock_irqsave(&hr_cq->lock, flags);
2851 for (npolled = 0; npolled < num_entries; ++npolled) {
2852 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2857 /* Memory barrier */
2859 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2862 spin_unlock_irqrestore(&hr_cq->lock, flags);
2867 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2868 struct hns_roce_hem_table *table, int obj,
2871 struct device *dev = hr_dev->dev;
2872 struct hns_roce_cmd_mailbox *mailbox;
2873 struct hns_roce_hem_iter iter;
2874 struct hns_roce_hem_mhop mhop;
2875 struct hns_roce_hem *hem;
2876 unsigned long mhop_obj = obj;
2886 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2889 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2893 hop_num = mhop.hop_num;
2894 chunk_ba_num = mhop.bt_chunk_size / 8;
2897 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2899 l1_idx = i * chunk_ba_num + j;
2900 } else if (hop_num == 1) {
2901 hem_idx = i * chunk_ba_num + j;
2902 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2906 switch (table->type) {
2908 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2911 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2914 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2917 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2920 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
2922 case HEM_TYPE_QPC_TIMER:
2923 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
2925 case HEM_TYPE_CQC_TIMER:
2926 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
2929 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2934 if (table->type == HEM_TYPE_SCCC && step_idx)
2939 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2940 if (IS_ERR(mailbox))
2941 return PTR_ERR(mailbox);
2943 if (table->type == HEM_TYPE_SCCC)
2946 if (check_whether_last_step(hop_num, step_idx)) {
2947 hem = table->hem[hem_idx];
2948 for (hns_roce_hem_first(hem, &iter);
2949 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2950 bt_ba = hns_roce_hem_addr(&iter);
2952 /* configure the ba, tag, and op */
2953 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2955 HNS_ROCE_CMD_TIMEOUT_MSECS);
2959 bt_ba = table->bt_l0_dma_addr[i];
2960 else if (step_idx == 1 && hop_num == 2)
2961 bt_ba = table->bt_l1_dma_addr[l1_idx];
2963 /* configure the ba, tag, and op */
2964 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
2965 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
2968 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2972 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
2973 struct hns_roce_hem_table *table, int obj,
2976 struct device *dev = hr_dev->dev;
2977 struct hns_roce_cmd_mailbox *mailbox;
2981 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2984 switch (table->type) {
2986 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
2989 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
2992 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
2995 case HEM_TYPE_QPC_TIMER:
2996 case HEM_TYPE_CQC_TIMER:
2999 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3002 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3007 if (table->type == HEM_TYPE_SCCC ||
3008 table->type == HEM_TYPE_QPC_TIMER ||
3009 table->type == HEM_TYPE_CQC_TIMER)
3014 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3015 if (IS_ERR(mailbox))
3016 return PTR_ERR(mailbox);
3018 /* configure the tag and op */
3019 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3020 HNS_ROCE_CMD_TIMEOUT_MSECS);
3022 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3026 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3027 struct hns_roce_mtt *mtt,
3028 enum ib_qp_state cur_state,
3029 enum ib_qp_state new_state,
3030 struct hns_roce_v2_qp_context *context,
3031 struct hns_roce_qp *hr_qp)
3033 struct hns_roce_cmd_mailbox *mailbox;
3036 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3037 if (IS_ERR(mailbox))
3038 return PTR_ERR(mailbox);
3040 memcpy(mailbox->buf, context, sizeof(*context) * 2);
3042 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3043 HNS_ROCE_CMD_MODIFY_QPC,
3044 HNS_ROCE_CMD_TIMEOUT_MSECS);
3046 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3051 static void set_access_flags(struct hns_roce_qp *hr_qp,
3052 struct hns_roce_v2_qp_context *context,
3053 struct hns_roce_v2_qp_context *qpc_mask,
3054 const struct ib_qp_attr *attr, int attr_mask)
3059 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3060 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3062 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3063 attr->qp_access_flags : hr_qp->atomic_rd_en;
3065 if (!dest_rd_atomic)
3066 access_flags &= IB_ACCESS_REMOTE_WRITE;
3068 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3069 !!(access_flags & IB_ACCESS_REMOTE_READ));
3070 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3072 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3073 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3074 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3076 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3077 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3078 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3081 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3082 const struct ib_qp_attr *attr,
3084 struct hns_roce_v2_qp_context *context,
3085 struct hns_roce_v2_qp_context *qpc_mask)
3087 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3088 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3091 * In v2 engine, software pass context and context mask to hardware
3092 * when modifying qp. If software need modify some fields in context,
3093 * we should set all bits of the relevant fields in context mask to
3094 * 0 at the same time, else set them to 0x1.
3096 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3097 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3098 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3099 V2_QPC_BYTE_4_TST_S, 0);
3101 if (ibqp->qp_type == IB_QPT_GSI)
3102 roce_set_field(context->byte_4_sqpn_tst,
3103 V2_QPC_BYTE_4_SGE_SHIFT_M,
3104 V2_QPC_BYTE_4_SGE_SHIFT_S,
3105 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3107 roce_set_field(context->byte_4_sqpn_tst,
3108 V2_QPC_BYTE_4_SGE_SHIFT_M,
3109 V2_QPC_BYTE_4_SGE_SHIFT_S,
3110 hr_qp->sq.max_gs > 2 ?
3111 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3113 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3114 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3116 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3117 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3118 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3119 V2_QPC_BYTE_4_SQPN_S, 0);
3121 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3122 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3123 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3124 V2_QPC_BYTE_16_PD_S, 0);
3126 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3127 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3128 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3129 V2_QPC_BYTE_20_RQWS_S, 0);
3131 roce_set_field(context->byte_20_smac_sgid_idx,
3132 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3133 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3134 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3135 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3137 roce_set_field(context->byte_20_smac_sgid_idx,
3138 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3139 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3140 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
3141 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3142 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3143 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3145 /* No VLAN need to set 0xFFF */
3146 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3147 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3148 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3149 V2_QPC_BYTE_24_VLAN_ID_S, 0);
3152 * Set some fields in context to zero, Because the default values
3153 * of all fields in context are zero, we need not set them to 0 again.
3154 * but we should set the relevant fields of context mask to 0.
3156 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3157 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3158 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3159 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3161 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3162 V2_QPC_BYTE_60_TEMPID_S, 0);
3164 roce_set_field(qpc_mask->byte_60_qpst_tempid,
3165 V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3167 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3168 V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3169 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3170 V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
3171 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3172 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3174 if (attr_mask & IB_QP_QKEY) {
3175 context->qkey_xrcd = attr->qkey;
3176 qpc_mask->qkey_xrcd = 0;
3177 hr_qp->qkey = attr->qkey;
3180 if (hr_qp->rdb_en) {
3181 roce_set_bit(context->byte_68_rq_db,
3182 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3183 roce_set_bit(qpc_mask->byte_68_rq_db,
3184 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3187 roce_set_field(context->byte_68_rq_db,
3188 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3189 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3190 ((u32)hr_qp->rdb.dma) >> 1);
3191 roce_set_field(qpc_mask->byte_68_rq_db,
3192 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3193 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
3194 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
3195 qpc_mask->rq_db_record_addr = 0;
3197 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3198 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3199 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3201 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3202 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3203 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3204 V2_QPC_BYTE_80_RX_CQN_S, 0);
3206 roce_set_field(context->byte_76_srqn_op_en,
3207 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3208 to_hr_srq(ibqp->srq)->srqn);
3209 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3210 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3211 roce_set_bit(context->byte_76_srqn_op_en,
3212 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3213 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3214 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3217 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3218 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3219 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3220 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3221 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3222 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3224 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3225 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3227 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3228 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3230 roce_set_field(qpc_mask->byte_104_rq_sge,
3231 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3232 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3234 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3235 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3236 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3237 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3238 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3239 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3240 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3242 qpc_mask->rq_rnr_timer = 0;
3243 qpc_mask->rx_msg_len = 0;
3244 qpc_mask->rx_rkey_pkt_info = 0;
3245 qpc_mask->rx_va = 0;
3247 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3248 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3249 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3250 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3252 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3254 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3255 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3256 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3257 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3259 roce_set_field(qpc_mask->byte_144_raq,
3260 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3261 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
3262 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3263 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3264 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3266 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3267 V2_QPC_BYTE_148_RQ_MSN_S, 0);
3268 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3269 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3271 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3272 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3273 roce_set_field(qpc_mask->byte_152_raq,
3274 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3275 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3277 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3278 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3280 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3281 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3282 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3283 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3284 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3285 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3287 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3288 V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3289 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3290 V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3291 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3292 V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
3293 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3294 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
3295 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3296 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
3297 roce_set_field(qpc_mask->byte_168_irrl_idx,
3298 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3299 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3301 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3302 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3303 roce_set_field(qpc_mask->byte_172_sq_psn,
3304 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3305 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3307 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3310 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3311 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3313 roce_set_field(qpc_mask->byte_176_msg_pktn,
3314 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3315 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3316 roce_set_field(qpc_mask->byte_176_msg_pktn,
3317 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3318 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3320 roce_set_field(qpc_mask->byte_184_irrl_idx,
3321 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3322 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3324 qpc_mask->cur_sge_offset = 0;
3326 roce_set_field(qpc_mask->byte_192_ext_sge,
3327 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3328 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3329 roce_set_field(qpc_mask->byte_192_ext_sge,
3330 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3331 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3333 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3334 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3336 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3337 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3338 roce_set_field(qpc_mask->byte_200_sq_max,
3339 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3340 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3342 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3343 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3345 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3346 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3348 qpc_mask->sq_timer = 0;
3350 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3351 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3352 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3353 roce_set_field(qpc_mask->byte_232_irrl_sge,
3354 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3355 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3357 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3359 roce_set_bit(qpc_mask->byte_232_irrl_sge,
3360 V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3361 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3364 qpc_mask->irrl_cur_sge_offset = 0;
3366 roce_set_field(qpc_mask->byte_240_irrl_tail,
3367 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3368 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3369 roce_set_field(qpc_mask->byte_240_irrl_tail,
3370 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3371 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3372 roce_set_field(qpc_mask->byte_240_irrl_tail,
3373 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3374 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3376 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3377 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3378 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3380 roce_set_field(qpc_mask->byte_248_ack_psn,
3381 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3382 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3383 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3385 roce_set_bit(qpc_mask->byte_248_ack_psn,
3386 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3387 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3390 hr_qp->access_flags = attr->qp_access_flags;
3391 hr_qp->pkey_index = attr->pkey_index;
3392 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3393 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3394 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3395 V2_QPC_BYTE_252_TX_CQN_S, 0);
3397 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3398 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3400 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3401 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3402 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3403 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3404 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3405 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3408 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3409 const struct ib_qp_attr *attr, int attr_mask,
3410 struct hns_roce_v2_qp_context *context,
3411 struct hns_roce_v2_qp_context *qpc_mask)
3413 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3416 * In v2 engine, software pass context and context mask to hardware
3417 * when modifying qp. If software need modify some fields in context,
3418 * we should set all bits of the relevant fields in context mask to
3419 * 0 at the same time, else set them to 0x1.
3421 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3422 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3423 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3424 V2_QPC_BYTE_4_TST_S, 0);
3426 if (ibqp->qp_type == IB_QPT_GSI)
3427 roce_set_field(context->byte_4_sqpn_tst,
3428 V2_QPC_BYTE_4_SGE_SHIFT_M,
3429 V2_QPC_BYTE_4_SGE_SHIFT_S,
3430 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3432 roce_set_field(context->byte_4_sqpn_tst,
3433 V2_QPC_BYTE_4_SGE_SHIFT_M,
3434 V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
3435 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3437 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3438 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3440 if (attr_mask & IB_QP_ACCESS_FLAGS) {
3441 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3442 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3443 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3446 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3447 !!(attr->qp_access_flags &
3448 IB_ACCESS_REMOTE_WRITE));
3449 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3452 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3453 !!(attr->qp_access_flags &
3454 IB_ACCESS_REMOTE_ATOMIC));
3455 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3458 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3459 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3460 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3463 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3464 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3465 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3468 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3469 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3470 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3474 roce_set_field(context->byte_20_smac_sgid_idx,
3475 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3476 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3477 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3478 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3480 roce_set_field(context->byte_20_smac_sgid_idx,
3481 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3482 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3483 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
3484 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3485 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3486 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3488 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3489 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3490 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3491 V2_QPC_BYTE_16_PD_S, 0);
3493 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3494 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3495 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3496 V2_QPC_BYTE_80_RX_CQN_S, 0);
3498 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3499 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3500 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3501 V2_QPC_BYTE_252_TX_CQN_S, 0);
3504 roce_set_bit(context->byte_76_srqn_op_en,
3505 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3506 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3507 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3508 roce_set_field(context->byte_76_srqn_op_en,
3509 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3510 to_hr_srq(ibqp->srq)->srqn);
3511 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3512 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3515 if (attr_mask & IB_QP_QKEY) {
3516 context->qkey_xrcd = attr->qkey;
3517 qpc_mask->qkey_xrcd = 0;
3520 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3521 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3522 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3523 V2_QPC_BYTE_4_SQPN_S, 0);
3525 if (attr_mask & IB_QP_DEST_QPN) {
3526 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3527 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3528 roce_set_field(qpc_mask->byte_56_dqpn_err,
3529 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3533 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3534 const struct ib_qp_attr *attr, int attr_mask,
3535 struct hns_roce_v2_qp_context *context,
3536 struct hns_roce_v2_qp_context *qpc_mask)
3538 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3539 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3540 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3541 struct device *dev = hr_dev->dev;
3542 dma_addr_t dma_handle_3;
3543 dma_addr_t dma_handle_2;
3544 dma_addr_t dma_handle;
3554 /* Search qp buf's mtts */
3555 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3556 hr_qp->mtt.first_seg, &dma_handle);
3558 dev_err(dev, "qp buf pa find failed\n");
3562 /* Search IRRL's mtts */
3563 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3564 hr_qp->qpn, &dma_handle_2);
3566 dev_err(dev, "qp irrl_table find failed\n");
3570 /* Search TRRL's mtts */
3571 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3572 hr_qp->qpn, &dma_handle_3);
3574 dev_err(dev, "qp trrl_table find failed\n");
3578 if (attr_mask & IB_QP_ALT_PATH) {
3579 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3583 dmac = (u8 *)attr->ah_attr.roce.dmac;
3584 context->wqe_sge_ba = (u32)(dma_handle >> 3);
3585 qpc_mask->wqe_sge_ba = 0;
3588 * In v2 engine, software pass context and context mask to hardware
3589 * when modifying qp. If software need modify some fields in context,
3590 * we should set all bits of the relevant fields in context mask to
3591 * 0 at the same time, else set them to 0x1.
3593 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3594 V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
3595 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3596 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3598 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3599 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3600 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3601 0 : hr_dev->caps.mtt_hop_num);
3602 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3603 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3605 roce_set_field(context->byte_20_smac_sgid_idx,
3606 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3607 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3608 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3609 hr_dev->caps.mtt_hop_num : 0);
3610 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3611 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3612 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3614 roce_set_field(context->byte_20_smac_sgid_idx,
3615 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3616 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3617 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3618 0 : hr_dev->caps.mtt_hop_num);
3619 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3620 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3621 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3623 roce_set_field(context->byte_16_buf_ba_pg_sz,
3624 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3625 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3626 hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET);
3627 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3628 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3629 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3631 roce_set_field(context->byte_16_buf_ba_pg_sz,
3632 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3633 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3634 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3635 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3636 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3637 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3639 roce_set_field(context->byte_80_rnr_rx_cqn,
3640 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3641 V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
3642 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
3643 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3644 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
3646 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3647 context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
3648 >> PAGE_ADDR_SHIFT);
3649 qpc_mask->rq_cur_blk_addr = 0;
3651 roce_set_field(context->byte_92_srq_info,
3652 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3653 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3654 mtts[hr_qp->rq.offset / page_size]
3655 >> (32 + PAGE_ADDR_SHIFT));
3656 roce_set_field(qpc_mask->byte_92_srq_info,
3657 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3658 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3660 context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
3661 >> PAGE_ADDR_SHIFT);
3662 qpc_mask->rq_nxt_blk_addr = 0;
3664 roce_set_field(context->byte_104_rq_sge,
3665 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3666 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3667 mtts[hr_qp->rq.offset / page_size + 1]
3668 >> (32 + PAGE_ADDR_SHIFT));
3669 roce_set_field(qpc_mask->byte_104_rq_sge,
3670 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3671 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3673 roce_set_field(context->byte_108_rx_reqepsn,
3674 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3675 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
3676 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3677 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3678 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
3680 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3681 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3682 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3683 V2_QPC_BYTE_132_TRRL_BA_S, 0);
3684 context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
3685 qpc_mask->trrl_ba = 0;
3686 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3687 V2_QPC_BYTE_140_TRRL_BA_S,
3688 (u32)(dma_handle_3 >> (32 + 16 + 4)));
3689 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3690 V2_QPC_BYTE_140_TRRL_BA_S, 0);
3692 context->irrl_ba = (u32)(dma_handle_2 >> 6);
3693 qpc_mask->irrl_ba = 0;
3694 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3695 V2_QPC_BYTE_208_IRRL_BA_S,
3696 dma_handle_2 >> (32 + 6));
3697 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3698 V2_QPC_BYTE_208_IRRL_BA_S, 0);
3700 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3701 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3703 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3704 hr_qp->sq_signal_bits);
3705 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3708 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3710 smac = (u8 *)hr_dev->dev_addr[port];
3711 /* when dmac equals smac or loop_idc is 1, it should loopback */
3712 if (ether_addr_equal_unaligned(dmac, smac) ||
3713 hr_dev->loop_idc == 0x1) {
3714 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3715 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3718 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
3719 attr->max_dest_rd_atomic) {
3720 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3721 V2_QPC_BYTE_140_RR_MAX_S,
3722 fls(attr->max_dest_rd_atomic - 1));
3723 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3724 V2_QPC_BYTE_140_RR_MAX_S, 0);
3727 if (attr_mask & IB_QP_DEST_QPN) {
3728 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3729 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3730 roce_set_field(qpc_mask->byte_56_dqpn_err,
3731 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3734 /* Configure GID index */
3735 port_num = rdma_ah_get_port_num(&attr->ah_attr);
3736 roce_set_field(context->byte_20_smac_sgid_idx,
3737 V2_QPC_BYTE_20_SGID_IDX_M,
3738 V2_QPC_BYTE_20_SGID_IDX_S,
3739 hns_get_gid_index(hr_dev, port_num - 1,
3741 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3742 V2_QPC_BYTE_20_SGID_IDX_M,
3743 V2_QPC_BYTE_20_SGID_IDX_S, 0);
3744 memcpy(&(context->dmac), dmac, 4);
3745 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3746 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3748 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3749 V2_QPC_BYTE_52_DMAC_S, 0);
3751 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3752 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3753 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3754 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3756 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3757 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3758 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3759 else if (attr_mask & IB_QP_PATH_MTU)
3760 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3761 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3763 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3764 V2_QPC_BYTE_24_MTU_S, 0);
3766 roce_set_field(context->byte_84_rq_ci_pi,
3767 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3768 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3769 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3770 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3771 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3773 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3774 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3775 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3776 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3777 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3778 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3779 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3780 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3781 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3782 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3784 context->rq_rnr_timer = 0;
3785 qpc_mask->rq_rnr_timer = 0;
3787 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3788 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
3789 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3790 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3792 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3793 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3794 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3795 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3797 roce_set_field(context->byte_168_irrl_idx,
3798 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3799 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3800 roce_set_field(qpc_mask->byte_168_irrl_idx,
3801 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3802 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3807 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3808 const struct ib_qp_attr *attr, int attr_mask,
3809 struct hns_roce_v2_qp_context *context,
3810 struct hns_roce_v2_qp_context *qpc_mask)
3812 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3813 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3814 struct device *dev = hr_dev->dev;
3815 dma_addr_t dma_handle;
3819 /* Search qp buf's mtts */
3820 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3821 hr_qp->mtt.first_seg, &dma_handle);
3823 dev_err(dev, "qp buf pa find failed\n");
3827 /* Not support alternate path and path migration */
3828 if ((attr_mask & IB_QP_ALT_PATH) ||
3829 (attr_mask & IB_QP_PATH_MIG_STATE)) {
3830 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3835 * In v2 engine, software pass context and context mask to hardware
3836 * when modifying qp. If software need modify some fields in context,
3837 * we should set all bits of the relevant fields in context mask to
3838 * 0 at the same time, else set them to 0x1.
3840 context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3841 roce_set_field(context->byte_168_irrl_idx,
3842 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3843 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3844 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3845 qpc_mask->sq_cur_blk_addr = 0;
3846 roce_set_field(qpc_mask->byte_168_irrl_idx,
3847 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3848 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3850 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3851 context->sq_cur_sge_blk_addr =
3852 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3853 ((u32)(mtts[hr_qp->sge.offset / page_size]
3854 >> PAGE_ADDR_SHIFT)) : 0;
3855 roce_set_field(context->byte_184_irrl_idx,
3856 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3857 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3858 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3859 (mtts[hr_qp->sge.offset / page_size] >>
3860 (32 + PAGE_ADDR_SHIFT)) : 0);
3861 qpc_mask->sq_cur_sge_blk_addr = 0;
3862 roce_set_field(qpc_mask->byte_184_irrl_idx,
3863 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3864 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3866 context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3867 roce_set_field(context->byte_232_irrl_sge,
3868 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3869 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3870 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3871 qpc_mask->rx_sq_cur_blk_addr = 0;
3872 roce_set_field(qpc_mask->byte_232_irrl_sge,
3873 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3874 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3877 * Set some fields in context to zero, Because the default values
3878 * of all fields in context are zero, we need not set them to 0 again.
3879 * but we should set the relevant fields of context mask to 0.
3881 roce_set_field(qpc_mask->byte_232_irrl_sge,
3882 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3883 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3885 roce_set_field(qpc_mask->byte_240_irrl_tail,
3886 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3887 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3889 roce_set_field(context->byte_244_rnr_rxack,
3890 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3891 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
3892 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3893 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3894 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
3896 roce_set_field(qpc_mask->byte_248_ack_psn,
3897 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3898 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3899 roce_set_bit(qpc_mask->byte_248_ack_psn,
3900 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3901 roce_set_field(qpc_mask->byte_248_ack_psn,
3902 V2_QPC_BYTE_248_IRRL_PSN_M,
3903 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3905 roce_set_field(qpc_mask->byte_240_irrl_tail,
3906 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3907 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3909 roce_set_field(context->byte_220_retry_psn_msn,
3910 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3911 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
3912 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3913 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3914 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
3916 roce_set_field(context->byte_224_retry_msg,
3917 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3918 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
3919 roce_set_field(qpc_mask->byte_224_retry_msg,
3920 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3921 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
3923 roce_set_field(context->byte_224_retry_msg,
3924 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3925 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
3926 roce_set_field(qpc_mask->byte_224_retry_msg,
3927 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3928 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
3930 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3931 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3932 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3934 roce_set_bit(qpc_mask->byte_248_ack_psn,
3935 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3937 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3938 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3940 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3941 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
3942 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3943 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
3945 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3946 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
3947 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3948 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
3950 roce_set_field(context->byte_244_rnr_rxack,
3951 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3952 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
3953 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3954 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3955 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
3957 roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3958 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
3959 roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3960 V2_QPC_BYTE_244_RNR_CNT_S, 0);
3962 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3963 V2_QPC_BYTE_212_LSN_S, 0x100);
3964 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3965 V2_QPC_BYTE_212_LSN_S, 0);
3967 if (attr_mask & IB_QP_TIMEOUT) {
3968 if (attr->timeout < 31) {
3969 roce_set_field(context->byte_28_at_fl,
3970 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
3972 roce_set_field(qpc_mask->byte_28_at_fl,
3973 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
3976 dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
3980 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3981 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
3982 roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3983 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
3985 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3986 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3987 roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3988 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
3989 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3990 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
3992 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
3993 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
3994 V2_QPC_BYTE_208_SR_MAX_S,
3995 fls(attr->max_rd_atomic - 1));
3996 roce_set_field(qpc_mask->byte_208_irrl,
3997 V2_QPC_BYTE_208_SR_MAX_M,
3998 V2_QPC_BYTE_208_SR_MAX_S, 0);
4003 static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
4004 enum ib_qp_state new_state)
4007 if ((cur_state != IB_QPS_RESET &&
4008 (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
4009 ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
4010 (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
4011 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
4018 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4019 const struct ib_qp_attr *attr,
4020 int attr_mask, enum ib_qp_state cur_state,
4021 enum ib_qp_state new_state)
4023 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4024 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4025 struct hns_roce_v2_qp_context *context;
4026 struct hns_roce_v2_qp_context *qpc_mask;
4027 struct device *dev = hr_dev->dev;
4030 context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
4034 qpc_mask = context + 1;
4036 * In v2 engine, software pass context and context mask to hardware
4037 * when modifying qp. If software need modify some fields in context,
4038 * we should set all bits of the relevant fields in context mask to
4039 * 0 at the same time, else set them to 0x1.
4041 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4042 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4043 memset(qpc_mask, 0, sizeof(*qpc_mask));
4044 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4046 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4047 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4049 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4050 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4054 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4055 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4059 } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
4063 dev_err(dev, "Illegal state for QP!\n");
4068 /* When QP state is err, SQ and RQ WQE should be flushed */
4069 if (new_state == IB_QPS_ERR) {
4070 roce_set_field(context->byte_160_sq_ci_pi,
4071 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4072 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4074 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4075 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4076 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4079 roce_set_field(context->byte_84_rq_ci_pi,
4080 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4081 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4083 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4084 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4085 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4089 if (attr_mask & IB_QP_AV) {
4090 const struct ib_global_route *grh =
4091 rdma_ah_read_grh(&attr->ah_attr);
4092 const struct ib_gid_attr *gid_attr = NULL;
4093 u8 src_mac[ETH_ALEN];
4094 int is_roce_protocol;
4099 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
4101 hr_port = ib_port - 1;
4102 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4103 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4105 if (is_roce_protocol) {
4106 gid_attr = attr->ah_attr.grh.sgid_attr;
4107 vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
4108 memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
4111 if (is_vlan_dev(gid_attr->ndev)) {
4112 roce_set_bit(context->byte_76_srqn_op_en,
4113 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4114 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4115 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4116 roce_set_bit(context->byte_168_irrl_idx,
4117 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4118 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4119 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4122 roce_set_field(context->byte_24_mtu_tc,
4123 V2_QPC_BYTE_24_VLAN_ID_M,
4124 V2_QPC_BYTE_24_VLAN_ID_S, vlan);
4125 roce_set_field(qpc_mask->byte_24_mtu_tc,
4126 V2_QPC_BYTE_24_VLAN_ID_M,
4127 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4129 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4130 dev_err(hr_dev->dev,
4131 "sgid_index(%u) too large. max is %d\n",
4133 hr_dev->caps.gid_table_len[hr_port]);
4138 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4139 dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4144 roce_set_field(context->byte_52_udpspn_dmac,
4145 V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
4146 (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
4149 roce_set_field(qpc_mask->byte_52_udpspn_dmac,
4150 V2_QPC_BYTE_52_UDPSPN_M,
4151 V2_QPC_BYTE_52_UDPSPN_S, 0);
4153 roce_set_field(context->byte_20_smac_sgid_idx,
4154 V2_QPC_BYTE_20_SGID_IDX_M,
4155 V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
4157 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4158 V2_QPC_BYTE_20_SGID_IDX_M,
4159 V2_QPC_BYTE_20_SGID_IDX_S, 0);
4161 roce_set_field(context->byte_24_mtu_tc,
4162 V2_QPC_BYTE_24_HOP_LIMIT_M,
4163 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4164 roce_set_field(qpc_mask->byte_24_mtu_tc,
4165 V2_QPC_BYTE_24_HOP_LIMIT_M,
4166 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4168 if (hr_dev->pci_dev->revision == 0x21 &&
4169 gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
4170 roce_set_field(context->byte_24_mtu_tc,
4171 V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
4172 grh->traffic_class >> 2);
4174 roce_set_field(context->byte_24_mtu_tc,
4175 V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
4176 grh->traffic_class);
4177 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4178 V2_QPC_BYTE_24_TC_S, 0);
4179 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4180 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4181 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4182 V2_QPC_BYTE_28_FL_S, 0);
4183 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4184 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4185 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4186 V2_QPC_BYTE_28_SL_S,
4187 rdma_ah_get_sl(&attr->ah_attr));
4188 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4189 V2_QPC_BYTE_28_SL_S, 0);
4190 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4193 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4194 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4196 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4198 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4199 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4201 /* Every status migrate must change state */
4202 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4203 V2_QPC_BYTE_60_QP_ST_S, new_state);
4204 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4205 V2_QPC_BYTE_60_QP_ST_S, 0);
4207 /* SW pass context to HW */
4208 ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
4211 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4215 hr_qp->state = new_state;
4217 if (attr_mask & IB_QP_ACCESS_FLAGS)
4218 hr_qp->atomic_rd_en = attr->qp_access_flags;
4220 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4221 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4222 if (attr_mask & IB_QP_PORT) {
4223 hr_qp->port = attr->port_num - 1;
4224 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4227 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4228 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4229 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4230 if (ibqp->send_cq != ibqp->recv_cq)
4231 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4238 hr_qp->sq_next_wqe = 0;
4239 hr_qp->next_sge = 0;
4240 if (hr_qp->rq.wqe_cnt)
4241 *hr_qp->rdb.db_record = 0;
4249 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4252 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
4253 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
4254 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
4255 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
4256 case HNS_ROCE_QP_ST_SQ_DRAINING:
4257 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
4258 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
4259 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
4264 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4265 struct hns_roce_qp *hr_qp,
4266 struct hns_roce_v2_qp_context *hr_context)
4268 struct hns_roce_cmd_mailbox *mailbox;
4271 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4272 if (IS_ERR(mailbox))
4273 return PTR_ERR(mailbox);
4275 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4276 HNS_ROCE_CMD_QUERY_QPC,
4277 HNS_ROCE_CMD_TIMEOUT_MSECS);
4279 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4283 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4286 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4290 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4292 struct ib_qp_init_attr *qp_init_attr)
4294 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4295 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4296 struct hns_roce_v2_qp_context *context;
4297 struct device *dev = hr_dev->dev;
4302 context = kzalloc(sizeof(*context), GFP_KERNEL);
4306 memset(qp_attr, 0, sizeof(*qp_attr));
4307 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4309 mutex_lock(&hr_qp->mutex);
4311 if (hr_qp->state == IB_QPS_RESET) {
4312 qp_attr->qp_state = IB_QPS_RESET;
4317 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
4319 dev_err(dev, "query qpc error\n");
4324 state = roce_get_field(context->byte_60_qpst_tempid,
4325 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4326 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4327 if (tmp_qp_state == -1) {
4328 dev_err(dev, "Illegal ib_qp_state\n");
4332 hr_qp->state = (u8)tmp_qp_state;
4333 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4334 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
4335 V2_QPC_BYTE_24_MTU_M,
4336 V2_QPC_BYTE_24_MTU_S);
4337 qp_attr->path_mig_state = IB_MIG_ARMED;
4338 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
4339 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4340 qp_attr->qkey = V2_QKEY_VAL;
4342 qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
4343 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4344 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4345 qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
4346 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4347 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4348 qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
4349 V2_QPC_BYTE_56_DQPN_M,
4350 V2_QPC_BYTE_56_DQPN_S);
4351 qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
4352 V2_QPC_BYTE_76_RRE_S)) << 2) |
4353 ((roce_get_bit(context->byte_76_srqn_op_en,
4354 V2_QPC_BYTE_76_RWE_S)) << 1) |
4355 ((roce_get_bit(context->byte_76_srqn_op_en,
4356 V2_QPC_BYTE_76_ATE_S)) << 3);
4357 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4358 hr_qp->ibqp.qp_type == IB_QPT_UC) {
4359 struct ib_global_route *grh =
4360 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4362 rdma_ah_set_sl(&qp_attr->ah_attr,
4363 roce_get_field(context->byte_28_at_fl,
4364 V2_QPC_BYTE_28_SL_M,
4365 V2_QPC_BYTE_28_SL_S));
4366 grh->flow_label = roce_get_field(context->byte_28_at_fl,
4367 V2_QPC_BYTE_28_FL_M,
4368 V2_QPC_BYTE_28_FL_S);
4369 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
4370 V2_QPC_BYTE_20_SGID_IDX_M,
4371 V2_QPC_BYTE_20_SGID_IDX_S);
4372 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
4373 V2_QPC_BYTE_24_HOP_LIMIT_M,
4374 V2_QPC_BYTE_24_HOP_LIMIT_S);
4375 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
4376 V2_QPC_BYTE_24_TC_M,
4377 V2_QPC_BYTE_24_TC_S);
4379 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
4382 qp_attr->port_num = hr_qp->port + 1;
4383 qp_attr->sq_draining = 0;
4384 qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
4385 V2_QPC_BYTE_208_SR_MAX_M,
4386 V2_QPC_BYTE_208_SR_MAX_S);
4387 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
4388 V2_QPC_BYTE_140_RR_MAX_M,
4389 V2_QPC_BYTE_140_RR_MAX_S);
4390 qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
4391 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4392 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4393 qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
4394 V2_QPC_BYTE_28_AT_M,
4395 V2_QPC_BYTE_28_AT_S);
4396 qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
4397 V2_QPC_BYTE_212_RETRY_CNT_M,
4398 V2_QPC_BYTE_212_RETRY_CNT_S);
4399 qp_attr->rnr_retry = context->rq_rnr_timer;
4402 qp_attr->cur_qp_state = qp_attr->qp_state;
4403 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4404 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4406 if (!ibqp->uobject) {
4407 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4408 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4410 qp_attr->cap.max_send_wr = 0;
4411 qp_attr->cap.max_send_sge = 0;
4414 qp_init_attr->cap = qp_attr->cap;
4417 mutex_unlock(&hr_qp->mutex);
4422 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4423 struct hns_roce_qp *hr_qp,
4426 struct hns_roce_cq *send_cq, *recv_cq;
4427 struct device *dev = hr_dev->dev;
4430 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4431 /* Modify qp to reset before destroying qp */
4432 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4433 hr_qp->state, IB_QPS_RESET);
4435 dev_err(dev, "modify QP %06lx to ERR failed.\n",
4441 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
4442 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
4444 hns_roce_lock_cqs(send_cq, recv_cq);
4447 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
4448 to_hr_srq(hr_qp->ibqp.srq) : NULL);
4449 if (send_cq != recv_cq)
4450 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4453 hns_roce_qp_remove(hr_dev, hr_qp);
4455 hns_roce_unlock_cqs(send_cq, recv_cq);
4457 hns_roce_qp_free(hr_dev, hr_qp);
4459 /* Not special_QP, free their QPN */
4460 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
4461 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
4462 (hr_qp->ibqp.qp_type == IB_QPT_UD))
4463 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
4465 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
4468 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
4469 hns_roce_db_unmap_user(
4470 to_hr_ucontext(hr_qp->ibqp.uobject->context),
4473 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
4474 hns_roce_db_unmap_user(
4475 to_hr_ucontext(hr_qp->ibqp.uobject->context),
4477 ib_umem_release(hr_qp->umem);
4479 kfree(hr_qp->sq.wrid);
4480 kfree(hr_qp->rq.wrid);
4481 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
4482 if (hr_qp->rq.wqe_cnt)
4483 hns_roce_free_db(hr_dev, &hr_qp->rdb);
4486 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
4487 hr_qp->rq.wqe_cnt) {
4488 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
4489 kfree(hr_qp->rq_inl_buf.wqe_list);
4495 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
4497 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4498 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4501 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, ibqp->uobject);
4503 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
4507 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
4508 kfree(hr_to_hr_sqp(hr_qp));
4515 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
4516 struct hns_roce_qp *hr_qp)
4518 struct hns_roce_sccc_clr_done *resp;
4519 struct hns_roce_sccc_clr *clr;
4520 struct hns_roce_cmq_desc desc;
4523 mutex_lock(&hr_dev->qp_table.scc_mutex);
4525 /* set scc ctx clear done flag */
4526 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
4527 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4529 dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
4533 /* clear scc context */
4534 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
4535 clr = (struct hns_roce_sccc_clr *)desc.data;
4536 clr->qpn = cpu_to_le32(hr_qp->qpn);
4537 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4539 dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
4543 /* query scc context clear is done or not */
4544 resp = (struct hns_roce_sccc_clr_done *)desc.data;
4545 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
4546 hns_roce_cmq_setup_basic_desc(&desc,
4547 HNS_ROCE_OPC_QUERY_SCCC, true);
4548 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4550 dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
4560 dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
4564 mutex_unlock(&hr_dev->qp_table.scc_mutex);
4568 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4570 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4571 struct hns_roce_v2_cq_context *cq_context;
4572 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4573 struct hns_roce_v2_cq_context *cqc_mask;
4574 struct hns_roce_cmd_mailbox *mailbox;
4577 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4578 if (IS_ERR(mailbox))
4579 return PTR_ERR(mailbox);
4581 cq_context = mailbox->buf;
4582 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4584 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4586 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4587 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4589 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4590 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4592 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4593 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4595 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4596 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4599 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4600 HNS_ROCE_CMD_MODIFY_CQC,
4601 HNS_ROCE_CMD_TIMEOUT_MSECS);
4602 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4604 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4609 static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4611 struct hns_roce_qp *hr_qp;
4612 struct ib_qp_attr attr;
4616 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4618 dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4622 if (hr_qp->ibqp.uobject) {
4623 if (hr_qp->sdb_en == 1) {
4624 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
4625 if (hr_qp->rdb_en == 1)
4626 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
4628 dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4633 attr_mask = IB_QP_STATE;
4634 attr.qp_state = IB_QPS_ERR;
4635 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4636 hr_qp->state, IB_QPS_ERR);
4638 dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4642 static void hns_roce_irq_work_handle(struct work_struct *work)
4644 struct hns_roce_work *irq_work =
4645 container_of(work, struct hns_roce_work, work);
4646 struct device *dev = irq_work->hr_dev->dev;
4647 u32 qpn = irq_work->qpn;
4648 u32 cqn = irq_work->cqn;
4650 switch (irq_work->event_type) {
4651 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4652 dev_info(dev, "Path migrated succeeded.\n");
4654 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4655 dev_warn(dev, "Path migration failed.\n");
4657 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4658 dev_info(dev, "Communication established.\n");
4660 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4661 dev_warn(dev, "Send queue drained.\n");
4663 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4664 dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
4665 qpn, irq_work->sub_type);
4666 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4668 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4669 dev_err(dev, "Invalid request local work queue 0x%x error.\n",
4671 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4673 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4674 dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
4675 qpn, irq_work->sub_type);
4676 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4678 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4679 dev_warn(dev, "SRQ limit reach.\n");
4681 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4682 dev_warn(dev, "SRQ last wqe reach.\n");
4684 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4685 dev_err(dev, "SRQ catas error.\n");
4687 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4688 dev_err(dev, "CQ 0x%x access err.\n", cqn);
4690 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4691 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4693 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4694 dev_warn(dev, "DB overflow.\n");
4696 case HNS_ROCE_EVENT_TYPE_FLR:
4697 dev_warn(dev, "Function level reset.\n");
4706 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4707 struct hns_roce_eq *eq,
4710 struct hns_roce_work *irq_work;
4712 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4716 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4717 irq_work->hr_dev = hr_dev;
4718 irq_work->qpn = qpn;
4719 irq_work->cqn = cqn;
4720 irq_work->event_type = eq->event_type;
4721 irq_work->sub_type = eq->sub_type;
4722 queue_work(hr_dev->irq_workq, &(irq_work->work));
4725 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4727 struct hns_roce_dev *hr_dev = eq->hr_dev;
4733 if (eq->type_flag == HNS_ROCE_AEQ) {
4734 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4735 HNS_ROCE_V2_EQ_DB_CMD_S,
4736 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4737 HNS_ROCE_EQ_DB_CMD_AEQ :
4738 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4740 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4741 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4743 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4744 HNS_ROCE_V2_EQ_DB_CMD_S,
4745 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4746 HNS_ROCE_EQ_DB_CMD_CEQ :
4747 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4750 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4751 HNS_ROCE_V2_EQ_DB_PARA_S,
4752 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4754 hns_roce_write64(hr_dev, doorbell, eq->doorbell);
4757 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4762 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4763 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4765 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4769 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4774 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4776 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4778 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4779 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4782 return (struct hns_roce_aeqe *)((u8 *)
4783 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
4786 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4788 struct hns_roce_aeqe *aeqe;
4791 aeqe = get_aeqe_v2(eq, eq->cons_index);
4793 aeqe = mhop_get_aeqe(eq, eq->cons_index);
4795 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
4796 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4799 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
4800 struct hns_roce_eq *eq)
4802 struct device *dev = hr_dev->dev;
4803 struct hns_roce_aeqe *aeqe;
4811 while ((aeqe = next_aeqe_sw_v2(eq))) {
4813 /* Make sure we read AEQ entry after we have checked the
4818 event_type = roce_get_field(aeqe->asyn,
4819 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
4820 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
4821 sub_type = roce_get_field(aeqe->asyn,
4822 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4823 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4824 qpn = roce_get_field(aeqe->event.qp_event.qp,
4825 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4826 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4827 cqn = roce_get_field(aeqe->event.cq_event.cq,
4828 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4829 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4830 srqn = roce_get_field(aeqe->event.srq_event.srq,
4831 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4832 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4834 switch (event_type) {
4835 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4836 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4837 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4838 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4839 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4840 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4841 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4842 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4843 hns_roce_qp_event(hr_dev, qpn, event_type);
4845 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4846 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4847 hns_roce_srq_event(hr_dev, srqn, event_type);
4849 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4850 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4851 hns_roce_cq_event(hr_dev, cqn, event_type);
4853 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4855 case HNS_ROCE_EVENT_TYPE_MB:
4856 hns_roce_cmd_event(hr_dev,
4857 le16_to_cpu(aeqe->event.cmd.token),
4858 aeqe->event.cmd.status,
4859 le64_to_cpu(aeqe->event.cmd.out_param));
4861 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4863 case HNS_ROCE_EVENT_TYPE_FLR:
4866 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4867 event_type, eq->eqn, eq->cons_index);
4871 eq->event_type = event_type;
4872 eq->sub_type = sub_type;
4876 if (eq->cons_index > (2 * eq->entries - 1)) {
4877 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4880 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
4883 set_eq_cons_index_v2(eq);
4887 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
4892 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4893 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4895 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
4899 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
4904 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4906 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4908 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4909 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
4912 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
4913 buf_chk_sz]) + off % buf_chk_sz);
4916 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
4918 struct hns_roce_ceqe *ceqe;
4921 ceqe = get_ceqe_v2(eq, eq->cons_index);
4923 ceqe = mhop_get_ceqe(eq, eq->cons_index);
4925 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
4926 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4929 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
4930 struct hns_roce_eq *eq)
4932 struct device *dev = hr_dev->dev;
4933 struct hns_roce_ceqe *ceqe;
4937 while ((ceqe = next_ceqe_sw_v2(eq))) {
4939 /* Make sure we read CEQ entry after we have checked the
4944 cqn = roce_get_field(ceqe->comp,
4945 HNS_ROCE_V2_CEQE_COMP_CQN_M,
4946 HNS_ROCE_V2_CEQE_COMP_CQN_S);
4948 hns_roce_cq_completion(hr_dev, cqn);
4953 if (eq->cons_index > (2 * eq->entries - 1)) {
4954 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4959 set_eq_cons_index_v2(eq);
4964 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
4966 struct hns_roce_eq *eq = eq_ptr;
4967 struct hns_roce_dev *hr_dev = eq->hr_dev;
4970 if (eq->type_flag == HNS_ROCE_CEQ)
4971 /* Completion event interrupt */
4972 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
4974 /* Asychronous event interrupt */
4975 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
4977 return IRQ_RETVAL(int_work);
4980 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
4982 struct hns_roce_dev *hr_dev = dev_id;
4983 struct device *dev = hr_dev->dev;
4988 /* Abnormal interrupt */
4989 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
4990 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
4992 if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
4993 struct pci_dev *pdev = hr_dev->pci_dev;
4994 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4995 const struct hnae3_ae_ops *ops = ae_dev->ops;
4997 dev_err(dev, "AEQ overflow!\n");
4999 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
5000 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5002 /* Set reset level for reset_event() */
5003 if (ops->set_default_reset_request)
5004 ops->set_default_reset_request(ae_dev,
5006 if (ops->reset_event)
5007 ops->reset_event(pdev, NULL);
5009 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5010 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5013 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5014 dev_err(dev, "BUS ERR!\n");
5016 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
5017 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5019 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5020 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5023 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5024 dev_err(dev, "OTHER ERR!\n");
5026 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
5027 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5029 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5030 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5034 dev_err(dev, "There is no abnormal irq found!\n");
5036 return IRQ_RETVAL(int_work);
5039 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5040 int eq_num, int enable_flag)
5044 if (enable_flag == EQ_ENABLE) {
5045 for (i = 0; i < eq_num; i++)
5046 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5048 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5050 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5051 HNS_ROCE_V2_VF_ABN_INT_EN_M);
5052 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5053 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5055 for (i = 0; i < eq_num; i++)
5056 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5058 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5060 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5061 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5062 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5063 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5067 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5069 struct device *dev = hr_dev->dev;
5072 if (eqn < hr_dev->caps.num_comp_vectors)
5073 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5074 0, HNS_ROCE_CMD_DESTROY_CEQC,
5075 HNS_ROCE_CMD_TIMEOUT_MSECS);
5077 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5078 0, HNS_ROCE_CMD_DESTROY_AEQC,
5079 HNS_ROCE_CMD_TIMEOUT_MSECS);
5081 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5084 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
5085 struct hns_roce_eq *eq)
5087 struct device *dev = hr_dev->dev;
5097 mhop_num = hr_dev->caps.eqe_hop_num;
5098 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5099 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5102 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5103 dma_free_coherent(dev, (unsigned int)(eq->entries *
5104 eq->eqe_size), eq->bt_l0, eq->l0_dma);
5108 /* hop_num = 1 or hop = 2 */
5109 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5110 if (mhop_num == 1) {
5111 for (i = 0; i < eq->l0_last_num; i++) {
5112 if (i == eq->l0_last_num - 1) {
5113 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5114 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5115 dma_free_coherent(dev, size, eq->buf[i],
5119 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5122 } else if (mhop_num == 2) {
5123 for (i = 0; i < eq->l0_last_num; i++) {
5124 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5127 for (j = 0; j < bt_chk_sz / 8; j++) {
5128 idx = i * (bt_chk_sz / 8) + j;
5129 if ((i == eq->l0_last_num - 1)
5130 && j == eq->l1_last_num - 1) {
5131 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5133 size = (eq->entries - eqe_alloc)
5135 dma_free_coherent(dev, size,
5140 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5155 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
5156 struct hns_roce_eq *eq)
5160 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5162 if (hr_dev->caps.eqe_hop_num) {
5163 hns_roce_mhop_free_eq(hr_dev, eq);
5168 dma_free_coherent(hr_dev->dev, buf_chk_sz,
5169 eq->buf_list->buf, eq->buf_list->map);
5172 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5173 struct hns_roce_eq *eq,
5176 struct hns_roce_eq_context *eqc;
5179 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5182 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5183 eq->hop_num = hr_dev->caps.eqe_hop_num;
5185 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5186 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5187 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5188 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5189 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5190 eq->shift = ilog2((unsigned int)eq->entries);
5193 eq->eqe_ba = eq->buf_list->map;
5195 eq->eqe_ba = eq->l0_dma;
5198 roce_set_field(eqc->byte_4,
5199 HNS_ROCE_EQC_EQ_ST_M,
5200 HNS_ROCE_EQC_EQ_ST_S,
5201 HNS_ROCE_V2_EQ_STATE_VALID);
5203 /* set eqe hop num */
5204 roce_set_field(eqc->byte_4,
5205 HNS_ROCE_EQC_HOP_NUM_M,
5206 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5208 /* set eqc over_ignore */
5209 roce_set_field(eqc->byte_4,
5210 HNS_ROCE_EQC_OVER_IGNORE_M,
5211 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5213 /* set eqc coalesce */
5214 roce_set_field(eqc->byte_4,
5215 HNS_ROCE_EQC_COALESCE_M,
5216 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5218 /* set eqc arm_state */
5219 roce_set_field(eqc->byte_4,
5220 HNS_ROCE_EQC_ARM_ST_M,
5221 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5224 roce_set_field(eqc->byte_4,
5226 HNS_ROCE_EQC_EQN_S, eq->eqn);
5229 roce_set_field(eqc->byte_4,
5230 HNS_ROCE_EQC_EQE_CNT_M,
5231 HNS_ROCE_EQC_EQE_CNT_S,
5232 HNS_ROCE_EQ_INIT_EQE_CNT);
5234 /* set eqe_ba_pg_sz */
5235 roce_set_field(eqc->byte_8,
5236 HNS_ROCE_EQC_BA_PG_SZ_M,
5237 HNS_ROCE_EQC_BA_PG_SZ_S,
5238 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
5240 /* set eqe_buf_pg_sz */
5241 roce_set_field(eqc->byte_8,
5242 HNS_ROCE_EQC_BUF_PG_SZ_M,
5243 HNS_ROCE_EQC_BUF_PG_SZ_S,
5244 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
5246 /* set eq_producer_idx */
5247 roce_set_field(eqc->byte_8,
5248 HNS_ROCE_EQC_PROD_INDX_M,
5249 HNS_ROCE_EQC_PROD_INDX_S,
5250 HNS_ROCE_EQ_INIT_PROD_IDX);
5252 /* set eq_max_cnt */
5253 roce_set_field(eqc->byte_12,
5254 HNS_ROCE_EQC_MAX_CNT_M,
5255 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5258 roce_set_field(eqc->byte_12,
5259 HNS_ROCE_EQC_PERIOD_M,
5260 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5262 /* set eqe_report_timer */
5263 roce_set_field(eqc->eqe_report_timer,
5264 HNS_ROCE_EQC_REPORT_TIMER_M,
5265 HNS_ROCE_EQC_REPORT_TIMER_S,
5266 HNS_ROCE_EQ_INIT_REPORT_TIMER);
5268 /* set eqe_ba [34:3] */
5269 roce_set_field(eqc->eqe_ba0,
5270 HNS_ROCE_EQC_EQE_BA_L_M,
5271 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5273 /* set eqe_ba [64:35] */
5274 roce_set_field(eqc->eqe_ba1,
5275 HNS_ROCE_EQC_EQE_BA_H_M,
5276 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5279 roce_set_field(eqc->byte_28,
5280 HNS_ROCE_EQC_SHIFT_M,
5281 HNS_ROCE_EQC_SHIFT_S, eq->shift);
5283 /* set eq MSI_IDX */
5284 roce_set_field(eqc->byte_28,
5285 HNS_ROCE_EQC_MSI_INDX_M,
5286 HNS_ROCE_EQC_MSI_INDX_S,
5287 HNS_ROCE_EQ_INIT_MSI_IDX);
5289 /* set cur_eqe_ba [27:12] */
5290 roce_set_field(eqc->byte_28,
5291 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5292 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5294 /* set cur_eqe_ba [59:28] */
5295 roce_set_field(eqc->byte_32,
5296 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5297 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5299 /* set cur_eqe_ba [63:60] */
5300 roce_set_field(eqc->byte_36,
5301 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5302 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5304 /* set eq consumer idx */
5305 roce_set_field(eqc->byte_36,
5306 HNS_ROCE_EQC_CONS_INDX_M,
5307 HNS_ROCE_EQC_CONS_INDX_S,
5308 HNS_ROCE_EQ_INIT_CONS_IDX);
5310 /* set nex_eqe_ba[43:12] */
5311 roce_set_field(eqc->nxt_eqe_ba0,
5312 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5313 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5315 /* set nex_eqe_ba[63:44] */
5316 roce_set_field(eqc->nxt_eqe_ba1,
5317 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5318 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5321 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5322 struct hns_roce_eq *eq)
5324 struct device *dev = hr_dev->dev;
5325 int eq_alloc_done = 0;
5340 mhop_num = hr_dev->caps.eqe_hop_num;
5341 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5342 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5344 ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
5346 bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
5349 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5350 if (eq->entries > buf_chk_sz / eq->eqe_size) {
5351 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
5355 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
5356 &(eq->l0_dma), GFP_KERNEL);
5360 eq->cur_eqe_ba = eq->l0_dma;
5363 memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
5368 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
5371 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
5373 goto err_kcalloc_buf;
5375 if (mhop_num == 2) {
5376 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
5378 goto err_kcalloc_l1_dma;
5380 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
5382 goto err_kcalloc_bt_l1;
5386 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
5388 goto err_dma_alloc_l0;
5390 if (mhop_num == 1) {
5391 if (ba_num > (bt_chk_sz / 8))
5392 dev_err(dev, "ba_num %d is too large for 1 hop\n",
5396 for (i = 0; i < bt_chk_sz / 8; i++) {
5397 if (eq_buf_cnt + 1 < ba_num) {
5400 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5401 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5403 eq->buf[i] = dma_alloc_coherent(dev, size,
5407 goto err_dma_alloc_buf;
5409 *(eq->bt_l0 + i) = eq->buf_dma[i];
5412 if (eq_buf_cnt >= ba_num)
5415 eq->cur_eqe_ba = eq->buf_dma[0];
5416 eq->nxt_eqe_ba = eq->buf_dma[1];
5418 } else if (mhop_num == 2) {
5419 /* alloc L1 BT and buf */
5420 for (i = 0; i < bt_chk_sz / 8; i++) {
5421 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
5425 goto err_dma_alloc_l1;
5426 *(eq->bt_l0 + i) = eq->l1_dma[i];
5428 for (j = 0; j < bt_chk_sz / 8; j++) {
5429 idx = i * bt_chk_sz / 8 + j;
5430 if (eq_buf_cnt + 1 < ba_num) {
5433 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5435 size = (eq->entries - eqe_alloc)
5438 eq->buf[idx] = dma_alloc_coherent(dev, size,
5439 &(eq->buf_dma[idx]),
5442 goto err_dma_alloc_buf;
5444 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5447 if (eq_buf_cnt >= ba_num) {
5456 eq->cur_eqe_ba = eq->buf_dma[0];
5457 eq->nxt_eqe_ba = eq->buf_dma[1];
5460 eq->l0_last_num = i + 1;
5462 eq->l1_last_num = j + 1;
5467 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5470 for (i -= 1; i >= 0; i--) {
5471 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5474 for (j = 0; j < bt_chk_sz / 8; j++) {
5475 idx = i * bt_chk_sz / 8 + j;
5476 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5480 goto err_dma_alloc_l0;
5483 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5488 for (i -= 1; i >= 0; i--)
5489 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5491 else if (mhop_num == 2) {
5494 for (; i >= 0; i--) {
5495 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5498 for (j = 0; j < bt_chk_sz / 8; j++) {
5499 if (i == record_i && j >= record_j)
5502 idx = i * bt_chk_sz / 8 + j;
5503 dma_free_coherent(dev, buf_chk_sz,
5529 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5530 struct hns_roce_eq *eq,
5531 unsigned int eq_cmd)
5533 struct device *dev = hr_dev->dev;
5534 struct hns_roce_cmd_mailbox *mailbox;
5538 /* Allocate mailbox memory */
5539 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5540 if (IS_ERR(mailbox))
5541 return PTR_ERR(mailbox);
5543 if (!hr_dev->caps.eqe_hop_num) {
5544 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5546 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
5548 if (!eq->buf_list) {
5553 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5554 &(eq->buf_list->map),
5556 if (!eq->buf_list->buf) {
5562 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
5569 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5571 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5572 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5574 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
5578 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5583 if (!hr_dev->caps.eqe_hop_num)
5584 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5587 hns_roce_mhop_free_eq(hr_dev, eq);
5592 kfree(eq->buf_list);
5595 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5600 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5602 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5603 struct device *dev = hr_dev->dev;
5604 struct hns_roce_eq *eq;
5605 unsigned int eq_cmd;
5614 other_num = hr_dev->caps.num_other_vectors;
5615 comp_num = hr_dev->caps.num_comp_vectors;
5616 aeq_num = hr_dev->caps.num_aeq_vectors;
5618 eq_num = comp_num + aeq_num;
5619 irq_num = eq_num + other_num;
5621 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5625 for (i = 0; i < irq_num; i++) {
5626 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5628 if (!hr_dev->irq_names[i]) {
5630 goto err_failed_kzalloc;
5635 for (j = 0; j < eq_num; j++) {
5636 eq = &eq_table->eq[j];
5637 eq->hr_dev = hr_dev;
5641 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5642 eq->type_flag = HNS_ROCE_CEQ;
5643 eq->entries = hr_dev->caps.ceqe_depth;
5644 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5645 eq->irq = hr_dev->irq[j + other_num + aeq_num];
5646 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5647 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5650 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5651 eq->type_flag = HNS_ROCE_AEQ;
5652 eq->entries = hr_dev->caps.aeqe_depth;
5653 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5654 eq->irq = hr_dev->irq[j - comp_num + other_num];
5655 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5656 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5659 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5661 dev_err(dev, "eq create failed.\n");
5662 goto err_create_eq_fail;
5667 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5669 /* irq contains: abnormal + AEQ + CEQ*/
5670 for (k = 0; k < irq_num; k++)
5672 snprintf((char *)hr_dev->irq_names[k],
5673 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
5674 else if (k < (other_num + aeq_num))
5675 snprintf((char *)hr_dev->irq_names[k],
5676 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5679 snprintf((char *)hr_dev->irq_names[k],
5680 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5681 k - other_num - aeq_num);
5683 for (k = 0; k < irq_num; k++) {
5685 ret = request_irq(hr_dev->irq[k],
5686 hns_roce_v2_msix_interrupt_abn,
5687 0, hr_dev->irq_names[k], hr_dev);
5689 else if (k < (other_num + comp_num))
5690 ret = request_irq(eq_table->eq[k - other_num].irq,
5691 hns_roce_v2_msix_interrupt_eq,
5692 0, hr_dev->irq_names[k + aeq_num],
5693 &eq_table->eq[k - other_num]);
5695 ret = request_irq(eq_table->eq[k - other_num].irq,
5696 hns_roce_v2_msix_interrupt_eq,
5697 0, hr_dev->irq_names[k - comp_num],
5698 &eq_table->eq[k - other_num]);
5700 dev_err(dev, "Request irq error!\n");
5701 goto err_request_irq_fail;
5706 create_singlethread_workqueue("hns_roce_irq_workqueue");
5707 if (!hr_dev->irq_workq) {
5708 dev_err(dev, "Create irq workqueue failed!\n");
5710 goto err_request_irq_fail;
5715 err_request_irq_fail:
5716 for (k -= 1; k >= 0; k--)
5718 free_irq(hr_dev->irq[k], hr_dev);
5720 free_irq(eq_table->eq[k - other_num].irq,
5721 &eq_table->eq[k - other_num]);
5724 for (j -= 1; j >= 0; j--)
5725 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
5728 for (i -= 1; i >= 0; i--)
5729 kfree(hr_dev->irq_names[i]);
5730 kfree(eq_table->eq);
5735 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5737 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5742 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5743 irq_num = eq_num + hr_dev->caps.num_other_vectors;
5746 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5748 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5749 free_irq(hr_dev->irq[i], hr_dev);
5751 for (i = 0; i < eq_num; i++) {
5752 hns_roce_v2_destroy_eqc(hr_dev, i);
5754 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
5756 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5759 for (i = 0; i < irq_num; i++)
5760 kfree(hr_dev->irq_names[i]);
5762 kfree(eq_table->eq);
5764 flush_workqueue(hr_dev->irq_workq);
5765 destroy_workqueue(hr_dev->irq_workq);
5768 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
5769 struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
5770 u32 cqn, void *mb_buf, u64 *mtts_wqe,
5771 u64 *mtts_idx, dma_addr_t dma_handle_wqe,
5772 dma_addr_t dma_handle_idx)
5774 struct hns_roce_srq_context *srq_context;
5776 srq_context = mb_buf;
5777 memset(srq_context, 0, sizeof(*srq_context));
5779 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
5780 SRQC_BYTE_4_SRQ_ST_S, 1);
5782 roce_set_field(srq_context->byte_4_srqn_srqst,
5783 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
5784 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
5785 (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5786 hr_dev->caps.srqwqe_hop_num));
5787 roce_set_field(srq_context->byte_4_srqn_srqst,
5788 SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
5791 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
5792 SRQC_BYTE_4_SRQN_S, srq->srqn);
5794 roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5795 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5797 roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
5798 SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
5800 srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
5802 roce_set_field(srq_context->byte_24_wqe_bt_ba,
5803 SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
5804 SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
5805 cpu_to_le32(dma_handle_wqe >> 35));
5807 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
5808 SRQC_BYTE_28_PD_S, pdn);
5809 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
5810 SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
5811 fls(srq->max_gs - 1));
5813 srq_context->idx_bt_ba = (u32)(dma_handle_idx >> 3);
5814 srq_context->idx_bt_ba = cpu_to_le32(srq_context->idx_bt_ba);
5815 roce_set_field(srq_context->rsv_idx_bt_ba,
5816 SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
5817 SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
5818 cpu_to_le32(dma_handle_idx >> 35));
5820 srq_context->idx_cur_blk_addr = (u32)(mtts_idx[0] >> PAGE_ADDR_SHIFT);
5821 srq_context->idx_cur_blk_addr =
5822 cpu_to_le32(srq_context->idx_cur_blk_addr);
5823 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5824 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
5825 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
5826 cpu_to_le32((mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT)));
5827 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5828 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
5829 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
5830 hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5831 hr_dev->caps.idx_hop_num);
5833 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5834 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
5835 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5836 hr_dev->caps.idx_ba_pg_sz);
5837 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5838 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
5839 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5840 hr_dev->caps.idx_buf_pg_sz);
5842 srq_context->idx_nxt_blk_addr = (u32)(mtts_idx[1] >> PAGE_ADDR_SHIFT);
5843 srq_context->idx_nxt_blk_addr =
5844 cpu_to_le32(srq_context->idx_nxt_blk_addr);
5845 roce_set_field(srq_context->rsv_idxnxtblkaddr,
5846 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
5847 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
5848 cpu_to_le32((mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT)));
5849 roce_set_field(srq_context->byte_56_xrc_cqn,
5850 SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
5852 roce_set_field(srq_context->byte_56_xrc_cqn,
5853 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
5854 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
5855 hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
5856 roce_set_field(srq_context->byte_56_xrc_cqn,
5857 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
5858 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
5859 hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
5861 roce_set_bit(srq_context->db_record_addr_record_en,
5862 SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
5865 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5866 struct ib_srq_attr *srq_attr,
5867 enum ib_srq_attr_mask srq_attr_mask,
5868 struct ib_udata *udata)
5870 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5871 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5872 struct hns_roce_srq_context *srq_context;
5873 struct hns_roce_srq_context *srqc_mask;
5874 struct hns_roce_cmd_mailbox *mailbox;
5877 if (srq_attr_mask & IB_SRQ_LIMIT) {
5878 if (srq_attr->srq_limit >= srq->max)
5881 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5882 if (IS_ERR(mailbox))
5883 return PTR_ERR(mailbox);
5885 srq_context = mailbox->buf;
5886 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5888 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5890 roce_set_field(srq_context->byte_8_limit_wl,
5891 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5892 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
5893 roce_set_field(srqc_mask->byte_8_limit_wl,
5894 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5895 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5897 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5898 HNS_ROCE_CMD_MODIFY_SRQC,
5899 HNS_ROCE_CMD_TIMEOUT_MSECS);
5900 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5902 dev_err(hr_dev->dev,
5903 "MODIFY SRQ Failed to cmd mailbox.\n");
5911 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5913 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5914 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5915 struct hns_roce_srq_context *srq_context;
5916 struct hns_roce_cmd_mailbox *mailbox;
5920 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5921 if (IS_ERR(mailbox))
5922 return PTR_ERR(mailbox);
5924 srq_context = mailbox->buf;
5925 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5926 HNS_ROCE_CMD_QUERY_SRQC,
5927 HNS_ROCE_CMD_TIMEOUT_MSECS);
5929 dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
5933 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
5934 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5935 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
5937 attr->srq_limit = limit_wl;
5938 attr->max_wr = srq->max - 1;
5939 attr->max_sge = srq->max_gs;
5941 memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
5944 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5948 static int find_empty_entry(struct hns_roce_idx_que *idx_que)
5953 /* bitmap[i] is set zero if all bits are allocated */
5954 for (i = 0; idx_que->bitmap[i] == 0; ++i)
5956 bit_num = ffs(idx_que->bitmap[i]);
5957 idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
5959 return i * sizeof(u64) * 8 + (bit_num - 1);
5962 static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
5963 int cur_idx, int wqe_idx)
5967 addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
5968 cur_idx * idx_que->entry_sz);
5972 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
5973 const struct ib_recv_wr *wr,
5974 const struct ib_recv_wr **bad_wr)
5976 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5977 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5978 struct hns_roce_v2_wqe_data_seg *dseg;
5979 struct hns_roce_v2_db srq_db;
5980 unsigned long flags;
5988 spin_lock_irqsave(&srq->lock, flags);
5990 ind = srq->head & (srq->max - 1);
5992 for (nreq = 0; wr; ++nreq, wr = wr->next) {
5993 if (unlikely(wr->num_sge > srq->max_gs)) {
5999 if (unlikely(srq->head == srq->tail)) {
6005 wqe_idx = find_empty_entry(&srq->idx_que);
6006 fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6007 wqe = get_srq_wqe(srq, wqe_idx);
6008 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6010 for (i = 0; i < wr->num_sge; ++i) {
6011 dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6012 dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6013 dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6016 if (i < srq->max_gs) {
6018 dseg->lkey = cpu_to_le32(0x100);
6022 srq->wrid[wqe_idx] = wr->wr_id;
6023 ind = (ind + 1) & (srq->max - 1);
6030 * Make sure that descriptors are written before
6035 srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn;
6036 srq_db.parameter = srq->head;
6038 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
6042 spin_unlock_irqrestore(&srq->lock, flags);
6047 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6048 .destroy_qp = hns_roce_v2_destroy_qp,
6049 .modify_cq = hns_roce_v2_modify_cq,
6050 .poll_cq = hns_roce_v2_poll_cq,
6051 .post_recv = hns_roce_v2_post_recv,
6052 .post_send = hns_roce_v2_post_send,
6053 .query_qp = hns_roce_v2_query_qp,
6054 .req_notify_cq = hns_roce_v2_req_notify_cq,
6057 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6058 .modify_srq = hns_roce_v2_modify_srq,
6059 .post_srq_recv = hns_roce_v2_post_srq_recv,
6060 .query_srq = hns_roce_v2_query_srq,
6063 static const struct hns_roce_hw hns_roce_hw_v2 = {
6064 .cmq_init = hns_roce_v2_cmq_init,
6065 .cmq_exit = hns_roce_v2_cmq_exit,
6066 .hw_profile = hns_roce_v2_profile,
6067 .hw_init = hns_roce_v2_init,
6068 .hw_exit = hns_roce_v2_exit,
6069 .post_mbox = hns_roce_v2_post_mbox,
6070 .chk_mbox = hns_roce_v2_chk_mbox,
6071 .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6072 .set_gid = hns_roce_v2_set_gid,
6073 .set_mac = hns_roce_v2_set_mac,
6074 .write_mtpt = hns_roce_v2_write_mtpt,
6075 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6076 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6077 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6078 .write_cqc = hns_roce_v2_write_cqc,
6079 .set_hem = hns_roce_v2_set_hem,
6080 .clear_hem = hns_roce_v2_clear_hem,
6081 .modify_qp = hns_roce_v2_modify_qp,
6082 .query_qp = hns_roce_v2_query_qp,
6083 .destroy_qp = hns_roce_v2_destroy_qp,
6084 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6085 .modify_cq = hns_roce_v2_modify_cq,
6086 .post_send = hns_roce_v2_post_send,
6087 .post_recv = hns_roce_v2_post_recv,
6088 .req_notify_cq = hns_roce_v2_req_notify_cq,
6089 .poll_cq = hns_roce_v2_poll_cq,
6090 .init_eq = hns_roce_v2_init_eq_table,
6091 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6092 .write_srqc = hns_roce_v2_write_srqc,
6093 .modify_srq = hns_roce_v2_modify_srq,
6094 .query_srq = hns_roce_v2_query_srq,
6095 .post_srq_recv = hns_roce_v2_post_srq_recv,
6096 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6097 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6100 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6101 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6102 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6103 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6104 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6105 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6106 /* required last entry */
6110 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6112 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6113 struct hnae3_handle *handle)
6115 struct hns_roce_v2_priv *priv = hr_dev->priv;
6116 const struct pci_device_id *id;
6119 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
6121 dev_err(hr_dev->dev, "device is not compatible!\n");
6125 hr_dev->hw = &hns_roce_hw_v2;
6126 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6127 hr_dev->odb_offset = hr_dev->sdb_offset;
6129 /* Get info from NIC driver. */
6130 hr_dev->reg_base = handle->rinfo.roce_io_base;
6131 hr_dev->caps.num_ports = 1;
6132 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6133 hr_dev->iboe.phy_port[0] = 0;
6135 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6136 hr_dev->iboe.netdevs[0]->dev_addr);
6138 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6139 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6140 i + handle->rinfo.base_vector);
6142 /* cmd issue mode: 0 is poll, 1 is event */
6143 hr_dev->cmd_mod = 1;
6144 hr_dev->loop_idc = 0;
6146 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6147 priv->handle = handle;
6152 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6154 struct hns_roce_dev *hr_dev;
6157 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6161 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6162 if (!hr_dev->priv) {
6164 goto error_failed_kzalloc;
6167 hr_dev->pci_dev = handle->pdev;
6168 hr_dev->dev = &handle->pdev->dev;
6170 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
6172 dev_err(hr_dev->dev, "Get Configuration failed!\n");
6173 goto error_failed_get_cfg;
6176 ret = hns_roce_init(hr_dev);
6178 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6179 goto error_failed_get_cfg;
6182 handle->priv = hr_dev;
6186 error_failed_get_cfg:
6187 kfree(hr_dev->priv);
6189 error_failed_kzalloc:
6190 ib_dealloc_device(&hr_dev->ib_dev);
6195 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6198 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6203 handle->priv = NULL;
6204 hns_roce_exit(hr_dev);
6205 kfree(hr_dev->priv);
6206 ib_dealloc_device(&hr_dev->ib_dev);
6209 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6211 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6212 struct device *dev = &handle->pdev->dev;
6215 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6217 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6218 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6222 ret = __hns_roce_hw_v2_init_instance(handle);
6224 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6225 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6226 if (ops->ae_dev_resetting(handle) ||
6227 ops->get_hw_reset_stat(handle))
6233 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6239 dev_err(dev, "Device is busy in resetting state.\n"
6240 "please retry later.\n");
6245 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6248 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6251 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6253 __hns_roce_hw_v2_uninit_instance(handle, reset);
6255 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6257 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6259 struct hns_roce_dev *hr_dev;
6260 struct ib_event event;
6262 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6263 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6267 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6268 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6270 hr_dev = (struct hns_roce_dev *)handle->priv;
6274 hr_dev->active = false;
6275 hr_dev->dis_db = true;
6277 event.event = IB_EVENT_DEVICE_FATAL;
6278 event.device = &hr_dev->ib_dev;
6279 event.element.port_num = 1;
6280 ib_dispatch_event(&event);
6285 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6287 struct device *dev = &handle->pdev->dev;
6290 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6291 &handle->rinfo.state)) {
6292 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6296 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6298 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6299 ret = __hns_roce_hw_v2_init_instance(handle);
6301 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6302 * callback function, RoCE Engine reinitialize. If RoCE reinit
6303 * failed, we should inform NIC driver.
6305 handle->priv = NULL;
6306 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6308 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6309 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6315 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6317 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6320 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6321 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6323 __hns_roce_hw_v2_uninit_instance(handle, false);
6328 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6329 enum hnae3_reset_notify_type type)
6334 case HNAE3_DOWN_CLIENT:
6335 ret = hns_roce_hw_v2_reset_notify_down(handle);
6337 case HNAE3_INIT_CLIENT:
6338 ret = hns_roce_hw_v2_reset_notify_init(handle);
6340 case HNAE3_UNINIT_CLIENT:
6341 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6350 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6351 .init_instance = hns_roce_hw_v2_init_instance,
6352 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6353 .reset_notify = hns_roce_hw_v2_reset_notify,
6356 static struct hnae3_client hns_roce_hw_v2_client = {
6357 .name = "hns_roce_hw_v2",
6358 .type = HNAE3_CLIENT_ROCE,
6359 .ops = &hns_roce_hw_v2_ops,
6362 static int __init hns_roce_hw_v2_init(void)
6364 return hnae3_register_client(&hns_roce_hw_v2_client);
6367 static void __exit hns_roce_hw_v2_exit(void)
6369 hnae3_unregister_client(&hns_roce_hw_v2_client);
6372 module_init(hns_roce_hw_v2_init);
6373 module_exit(hns_roce_hw_v2_exit);
6375 MODULE_LICENSE("Dual BSD/GPL");
6376 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6377 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6378 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6379 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");